From 15e7e5b10864fb5be5ba53304f2031771c349bfd Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:18 -0600 Subject: [PATCH 01/23] CRDBUMPER-create-apis Create v1alpha4 APIs. This used "kubebuilder create api --resource --controller=false" for each API. Signed-off-by: Blake Devcich --- PROJECT | 114 +- api/v1alpha4/groupversion_info.go | 39 + api/v1alpha4/nnfaccess_types.go | 67 + api/v1alpha4/nnfcontainerprofile_types.go | 67 + api/v1alpha4/nnfdatamovement_types.go | 67 + api/v1alpha4/nnfdatamovementmanager_types.go | 67 + api/v1alpha4/nnfdatamovementprofile_types.go | 67 + api/v1alpha4/nnflustremgt_types.go | 67 + api/v1alpha4/nnfnode_types.go | 67 + api/v1alpha4/nnfnodeblockstorage_types.go | 67 + api/v1alpha4/nnfnodeecdata_types.go | 67 + api/v1alpha4/nnfnodestorage_types.go | 67 + api/v1alpha4/nnfportmanager_types.go | 67 + api/v1alpha4/nnfstorage_types.go | 67 + api/v1alpha4/nnfstorageprofile_types.go | 67 + api/v1alpha4/nnfsystemstorage_types.go | 67 + api/v1alpha4/zz_generated.deepcopy.go | 1274 +++++++++++++++++ cmd/main.go | 2 + config/samples/kustomization.yaml | 14 + config/samples/nnf_v1alpha4_nnfaccess.yaml | 9 + .../nnf_v1alpha4_nnfcontainerprofile.yaml | 9 + .../samples/nnf_v1alpha4_nnfdatamovement.yaml | 9 + .../nnf_v1alpha4_nnfdatamovementmanager.yaml | 9 + .../nnf_v1alpha4_nnfdatamovementprofile.yaml | 9 + config/samples/nnf_v1alpha4_nnflustremgt.yaml | 9 + config/samples/nnf_v1alpha4_nnfnode.yaml | 9 + .../nnf_v1alpha4_nnfnodeblockstorage.yaml | 9 + .../samples/nnf_v1alpha4_nnfnodeecdata.yaml | 9 + .../samples/nnf_v1alpha4_nnfnodestorage.yaml | 9 + .../samples/nnf_v1alpha4_nnfportmanager.yaml | 9 + config/samples/nnf_v1alpha4_nnfstorage.yaml | 9 + .../nnf_v1alpha4_nnfstorageprofile.yaml | 9 + .../nnf_v1alpha4_nnfsystemstorage.yaml | 9 + 33 files changed, 2506 insertions(+), 1 deletion(-) create mode 100644 api/v1alpha4/groupversion_info.go create mode 100644 api/v1alpha4/nnfaccess_types.go create mode 100644 api/v1alpha4/nnfcontainerprofile_types.go create mode 100644 api/v1alpha4/nnfdatamovement_types.go create mode 100644 api/v1alpha4/nnfdatamovementmanager_types.go create mode 100644 api/v1alpha4/nnfdatamovementprofile_types.go create mode 100644 api/v1alpha4/nnflustremgt_types.go create mode 100644 api/v1alpha4/nnfnode_types.go create mode 100644 api/v1alpha4/nnfnodeblockstorage_types.go create mode 100644 api/v1alpha4/nnfnodeecdata_types.go create mode 100644 api/v1alpha4/nnfnodestorage_types.go create mode 100644 api/v1alpha4/nnfportmanager_types.go create mode 100644 api/v1alpha4/nnfstorage_types.go create mode 100644 api/v1alpha4/nnfstorageprofile_types.go create mode 100644 api/v1alpha4/nnfsystemstorage_types.go create mode 100644 api/v1alpha4/zz_generated.deepcopy.go create mode 100644 config/samples/nnf_v1alpha4_nnfaccess.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfcontainerprofile.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfdatamovement.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfdatamovementmanager.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfdatamovementprofile.yaml create mode 100644 config/samples/nnf_v1alpha4_nnflustremgt.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfnode.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfnodeblockstorage.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfnodeecdata.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfnodestorage.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfportmanager.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfstorage.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfstorageprofile.yaml create mode 100644 config/samples/nnf_v1alpha4_nnfsystemstorage.yaml diff --git a/PROJECT b/PROJECT index 35efadda..075ada57 100644 --- a/PROJECT +++ b/PROJECT @@ -409,4 +409,116 @@ resources: webhooks: conversion: true webhookVersion: v1 -version: '3' +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfAccess + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfContainerProfile + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfDataMovement + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfDataMovementManager + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfDataMovementProfile + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfLustreMGT + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNode + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeBlockStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeECData + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfPortManager + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfStorageProfile + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cray.hpe.com + group: nnf + kind: NnfSystemStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 + version: v1alpha4 +version: "3" diff --git a/api/v1alpha4/groupversion_info.go b/api/v1alpha4/groupversion_info.go new file mode 100644 index 00000000..398f8558 --- /dev/null +++ b/api/v1alpha4/groupversion_info.go @@ -0,0 +1,39 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package v1alpha4 contains API Schema definitions for the nnf v1alpha4 API group +// +kubebuilder:object:generate=true +// +groupName=nnf.cray.hpe.com +package v1alpha4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "nnf.cray.hpe.com", Version: "v1alpha4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha4/nnfaccess_types.go b/api/v1alpha4/nnfaccess_types.go new file mode 100644 index 00000000..d9745d24 --- /dev/null +++ b/api/v1alpha4/nnfaccess_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfAccessSpec defines the desired state of NnfAccess +type NnfAccessSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfAccess. Edit nnfaccess_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfAccessStatus defines the observed state of NnfAccess +type NnfAccessStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfAccess is the Schema for the nnfaccesses API +type NnfAccess struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfAccessSpec `json:"spec,omitempty"` + Status NnfAccessStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfAccessList contains a list of NnfAccess +type NnfAccessList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfAccess `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfAccess{}, &NnfAccessList{}) +} diff --git a/api/v1alpha4/nnfcontainerprofile_types.go b/api/v1alpha4/nnfcontainerprofile_types.go new file mode 100644 index 00000000..3bab739d --- /dev/null +++ b/api/v1alpha4/nnfcontainerprofile_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfContainerProfileSpec defines the desired state of NnfContainerProfile +type NnfContainerProfileSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfContainerProfile. Edit nnfcontainerprofile_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfContainerProfileStatus defines the observed state of NnfContainerProfile +type NnfContainerProfileStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfContainerProfile is the Schema for the nnfcontainerprofiles API +type NnfContainerProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfContainerProfileSpec `json:"spec,omitempty"` + Status NnfContainerProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfContainerProfileList contains a list of NnfContainerProfile +type NnfContainerProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfContainerProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfContainerProfile{}, &NnfContainerProfileList{}) +} diff --git a/api/v1alpha4/nnfdatamovement_types.go b/api/v1alpha4/nnfdatamovement_types.go new file mode 100644 index 00000000..6edcc131 --- /dev/null +++ b/api/v1alpha4/nnfdatamovement_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfDataMovementSpec defines the desired state of NnfDataMovement +type NnfDataMovementSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfDataMovement. Edit nnfdatamovement_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfDataMovementStatus defines the observed state of NnfDataMovement +type NnfDataMovementStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfDataMovement is the Schema for the nnfdatamovements API +type NnfDataMovement struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfDataMovementSpec `json:"spec,omitempty"` + Status NnfDataMovementStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfDataMovementList contains a list of NnfDataMovement +type NnfDataMovementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovement `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfDataMovement{}, &NnfDataMovementList{}) +} diff --git a/api/v1alpha4/nnfdatamovementmanager_types.go b/api/v1alpha4/nnfdatamovementmanager_types.go new file mode 100644 index 00000000..3064dbf0 --- /dev/null +++ b/api/v1alpha4/nnfdatamovementmanager_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager +type NnfDataMovementManagerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfDataMovementManager. Edit nnfdatamovementmanager_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfDataMovementManagerStatus defines the observed state of NnfDataMovementManager +type NnfDataMovementManagerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfDataMovementManager is the Schema for the nnfdatamovementmanagers API +type NnfDataMovementManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfDataMovementManagerSpec `json:"spec,omitempty"` + Status NnfDataMovementManagerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfDataMovementManagerList contains a list of NnfDataMovementManager +type NnfDataMovementManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovementManager `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfDataMovementManager{}, &NnfDataMovementManagerList{}) +} diff --git a/api/v1alpha4/nnfdatamovementprofile_types.go b/api/v1alpha4/nnfdatamovementprofile_types.go new file mode 100644 index 00000000..ce1cfb09 --- /dev/null +++ b/api/v1alpha4/nnfdatamovementprofile_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfDataMovementProfileSpec defines the desired state of NnfDataMovementProfile +type NnfDataMovementProfileSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfDataMovementProfile. Edit nnfdatamovementprofile_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfDataMovementProfileStatus defines the observed state of NnfDataMovementProfile +type NnfDataMovementProfileStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles API +type NnfDataMovementProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfDataMovementProfileSpec `json:"spec,omitempty"` + Status NnfDataMovementProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfDataMovementProfileList contains a list of NnfDataMovementProfile +type NnfDataMovementProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfDataMovementProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfDataMovementProfile{}, &NnfDataMovementProfileList{}) +} diff --git a/api/v1alpha4/nnflustremgt_types.go b/api/v1alpha4/nnflustremgt_types.go new file mode 100644 index 00000000..4143f680 --- /dev/null +++ b/api/v1alpha4/nnflustremgt_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfLustreMGTSpec defines the desired state of NnfLustreMGT +type NnfLustreMGTSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfLustreMGT. Edit nnflustremgt_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfLustreMGTStatus defines the observed state of NnfLustreMGT +type NnfLustreMGTStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfLustreMGT is the Schema for the nnflustremgts API +type NnfLustreMGT struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfLustreMGTSpec `json:"spec,omitempty"` + Status NnfLustreMGTStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfLustreMGTList contains a list of NnfLustreMGT +type NnfLustreMGTList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfLustreMGT `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfLustreMGT{}, &NnfLustreMGTList{}) +} diff --git a/api/v1alpha4/nnfnode_types.go b/api/v1alpha4/nnfnode_types.go new file mode 100644 index 00000000..85739d26 --- /dev/null +++ b/api/v1alpha4/nnfnode_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeSpec defines the desired state of NnfNode +type NnfNodeSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfNode. Edit nnfnode_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfNodeStatus defines the observed state of NnfNode +type NnfNodeStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfNode is the Schema for the nnfnodes API +type NnfNode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeSpec `json:"spec,omitempty"` + Status NnfNodeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfNodeList contains a list of NnfNode +type NnfNodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNode `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNode{}, &NnfNodeList{}) +} diff --git a/api/v1alpha4/nnfnodeblockstorage_types.go b/api/v1alpha4/nnfnodeblockstorage_types.go new file mode 100644 index 00000000..13c80153 --- /dev/null +++ b/api/v1alpha4/nnfnodeblockstorage_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeBlockStorageSpec defines the desired state of NnfNodeBlockStorage +type NnfNodeBlockStorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfNodeBlockStorage. Edit nnfnodeblockstorage_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfNodeBlockStorageStatus defines the observed state of NnfNodeBlockStorage +type NnfNodeBlockStorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfNodeBlockStorage is the Schema for the nnfnodeblockstorages API +type NnfNodeBlockStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeBlockStorageSpec `json:"spec,omitempty"` + Status NnfNodeBlockStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfNodeBlockStorageList contains a list of NnfNodeBlockStorage +type NnfNodeBlockStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeBlockStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNodeBlockStorage{}, &NnfNodeBlockStorageList{}) +} diff --git a/api/v1alpha4/nnfnodeecdata_types.go b/api/v1alpha4/nnfnodeecdata_types.go new file mode 100644 index 00000000..0dc8c19e --- /dev/null +++ b/api/v1alpha4/nnfnodeecdata_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeECDataSpec defines the desired state of NnfNodeECData +type NnfNodeECDataSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfNodeECData. Edit nnfnodeecdata_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfNodeECDataStatus defines the observed state of NnfNodeECData +type NnfNodeECDataStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfNodeECData is the Schema for the nnfnodeecdata API +type NnfNodeECData struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeECDataSpec `json:"spec,omitempty"` + Status NnfNodeECDataStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfNodeECDataList contains a list of NnfNodeECData +type NnfNodeECDataList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeECData `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNodeECData{}, &NnfNodeECDataList{}) +} diff --git a/api/v1alpha4/nnfnodestorage_types.go b/api/v1alpha4/nnfnodestorage_types.go new file mode 100644 index 00000000..28fa0856 --- /dev/null +++ b/api/v1alpha4/nnfnodestorage_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfNodeStorageSpec defines the desired state of NnfNodeStorage +type NnfNodeStorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfNodeStorage. Edit nnfnodestorage_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfNodeStorageStatus defines the observed state of NnfNodeStorage +type NnfNodeStorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfNodeStorage is the Schema for the nnfnodestorages API +type NnfNodeStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeStorageSpec `json:"spec,omitempty"` + Status NnfNodeStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfNodeStorageList contains a list of NnfNodeStorage +type NnfNodeStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfNodeStorage{}, &NnfNodeStorageList{}) +} diff --git a/api/v1alpha4/nnfportmanager_types.go b/api/v1alpha4/nnfportmanager_types.go new file mode 100644 index 00000000..6f00db0e --- /dev/null +++ b/api/v1alpha4/nnfportmanager_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfPortManagerSpec defines the desired state of NnfPortManager +type NnfPortManagerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfPortManager. Edit nnfportmanager_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfPortManagerStatus defines the observed state of NnfPortManager +type NnfPortManagerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfPortManager is the Schema for the nnfportmanagers API +type NnfPortManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfPortManagerSpec `json:"spec,omitempty"` + Status NnfPortManagerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfPortManagerList contains a list of NnfPortManager +type NnfPortManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfPortManager `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfPortManager{}, &NnfPortManagerList{}) +} diff --git a/api/v1alpha4/nnfstorage_types.go b/api/v1alpha4/nnfstorage_types.go new file mode 100644 index 00000000..e4be22b3 --- /dev/null +++ b/api/v1alpha4/nnfstorage_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfStorageSpec defines the desired state of NnfStorage +type NnfStorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfStorage. Edit nnfstorage_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfStorageStatus defines the observed state of NnfStorage +type NnfStorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfStorage is the Schema for the nnfstorages API +type NnfStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfStorageSpec `json:"spec,omitempty"` + Status NnfStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfStorageList contains a list of NnfStorage +type NnfStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfStorage{}, &NnfStorageList{}) +} diff --git a/api/v1alpha4/nnfstorageprofile_types.go b/api/v1alpha4/nnfstorageprofile_types.go new file mode 100644 index 00000000..318a115f --- /dev/null +++ b/api/v1alpha4/nnfstorageprofile_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfStorageProfileSpec defines the desired state of NnfStorageProfile +type NnfStorageProfileSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfStorageProfile. Edit nnfstorageprofile_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfStorageProfileStatus defines the observed state of NnfStorageProfile +type NnfStorageProfileStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfStorageProfile is the Schema for the nnfstorageprofiles API +type NnfStorageProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfStorageProfileSpec `json:"spec,omitempty"` + Status NnfStorageProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfStorageProfileList contains a list of NnfStorageProfile +type NnfStorageProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfStorageProfile `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfStorageProfile{}, &NnfStorageProfileList{}) +} diff --git a/api/v1alpha4/nnfsystemstorage_types.go b/api/v1alpha4/nnfsystemstorage_types.go new file mode 100644 index 00000000..05e9fe35 --- /dev/null +++ b/api/v1alpha4/nnfsystemstorage_types.go @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NnfSystemStorageSpec defines the desired state of NnfSystemStorage +type NnfSystemStorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NnfSystemStorage. Edit nnfsystemstorage_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NnfSystemStorageStatus defines the observed state of NnfSystemStorage +type NnfSystemStorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NnfSystemStorage is the Schema for the nnfsystemstorages API +type NnfSystemStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfSystemStorageSpec `json:"spec,omitempty"` + Status NnfSystemStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NnfSystemStorageList contains a list of NnfSystemStorage +type NnfSystemStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfSystemStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NnfSystemStorage{}, &NnfSystemStorageList{}) +} diff --git a/api/v1alpha4/zz_generated.deepcopy.go b/api/v1alpha4/zz_generated.deepcopy.go new file mode 100644 index 00000000..3bc08e49 --- /dev/null +++ b/api/v1alpha4/zz_generated.deepcopy.go @@ -0,0 +1,1274 @@ +//go:build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha4 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccess) DeepCopyInto(out *NnfAccess) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccess. +func (in *NnfAccess) DeepCopy() *NnfAccess { + if in == nil { + return nil + } + out := new(NnfAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfAccess) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessList) DeepCopyInto(out *NnfAccessList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfAccess, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessList. +func (in *NnfAccessList) DeepCopy() *NnfAccessList { + if in == nil { + return nil + } + out := new(NnfAccessList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfAccessList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessSpec) DeepCopyInto(out *NnfAccessSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessSpec. +func (in *NnfAccessSpec) DeepCopy() *NnfAccessSpec { + if in == nil { + return nil + } + out := new(NnfAccessSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfAccessStatus) DeepCopyInto(out *NnfAccessStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessStatus. +func (in *NnfAccessStatus) DeepCopy() *NnfAccessStatus { + if in == nil { + return nil + } + out := new(NnfAccessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfile) DeepCopyInto(out *NnfContainerProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfile. +func (in *NnfContainerProfile) DeepCopy() *NnfContainerProfile { + if in == nil { + return nil + } + out := new(NnfContainerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileList) DeepCopyInto(out *NnfContainerProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfContainerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileList. +func (in *NnfContainerProfileList) DeepCopy() *NnfContainerProfileList { + if in == nil { + return nil + } + out := new(NnfContainerProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfContainerProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileSpec) DeepCopyInto(out *NnfContainerProfileSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileSpec. +func (in *NnfContainerProfileSpec) DeepCopy() *NnfContainerProfileSpec { + if in == nil { + return nil + } + out := new(NnfContainerProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileStatus) DeepCopyInto(out *NnfContainerProfileStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStatus. +func (in *NnfContainerProfileStatus) DeepCopy() *NnfContainerProfileStatus { + if in == nil { + return nil + } + out := new(NnfContainerProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovement) DeepCopyInto(out *NnfDataMovement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovement. +func (in *NnfDataMovement) DeepCopy() *NnfDataMovement { + if in == nil { + return nil + } + out := new(NnfDataMovement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementList) DeepCopyInto(out *NnfDataMovementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementList. +func (in *NnfDataMovementList) DeepCopy() *NnfDataMovementList { + if in == nil { + return nil + } + out := new(NnfDataMovementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManager) DeepCopyInto(out *NnfDataMovementManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManager. +func (in *NnfDataMovementManager) DeepCopy() *NnfDataMovementManager { + if in == nil { + return nil + } + out := new(NnfDataMovementManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerList) DeepCopyInto(out *NnfDataMovementManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerList. +func (in *NnfDataMovementManagerList) DeepCopy() *NnfDataMovementManagerList { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerSpec) DeepCopyInto(out *NnfDataMovementManagerSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerSpec. +func (in *NnfDataMovementManagerSpec) DeepCopy() *NnfDataMovementManagerSpec { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementManagerStatus) DeepCopyInto(out *NnfDataMovementManagerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerStatus. +func (in *NnfDataMovementManagerStatus) DeepCopy() *NnfDataMovementManagerStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfile) DeepCopyInto(out *NnfDataMovementProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfile. +func (in *NnfDataMovementProfile) DeepCopy() *NnfDataMovementProfile { + if in == nil { + return nil + } + out := new(NnfDataMovementProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileList) DeepCopyInto(out *NnfDataMovementProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileList. +func (in *NnfDataMovementProfileList) DeepCopy() *NnfDataMovementProfileList { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfDataMovementProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileSpec) DeepCopyInto(out *NnfDataMovementProfileSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileSpec. +func (in *NnfDataMovementProfileSpec) DeepCopy() *NnfDataMovementProfileSpec { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileStatus) DeepCopyInto(out *NnfDataMovementProfileStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileStatus. +func (in *NnfDataMovementProfileStatus) DeepCopy() *NnfDataMovementProfileStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementSpec) DeepCopyInto(out *NnfDataMovementSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpec. +func (in *NnfDataMovementSpec) DeepCopy() *NnfDataMovementSpec { + if in == nil { + return nil + } + out := new(NnfDataMovementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementStatus) DeepCopyInto(out *NnfDataMovementStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementStatus. +func (in *NnfDataMovementStatus) DeepCopy() *NnfDataMovementStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGT) DeepCopyInto(out *NnfLustreMGT) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGT. +func (in *NnfLustreMGT) DeepCopy() *NnfLustreMGT { + if in == nil { + return nil + } + out := new(NnfLustreMGT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfLustreMGT) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTList) DeepCopyInto(out *NnfLustreMGTList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfLustreMGT, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTList. +func (in *NnfLustreMGTList) DeepCopy() *NnfLustreMGTList { + if in == nil { + return nil + } + out := new(NnfLustreMGTList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfLustreMGTList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTSpec) DeepCopyInto(out *NnfLustreMGTSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTSpec. +func (in *NnfLustreMGTSpec) DeepCopy() *NnfLustreMGTSpec { + if in == nil { + return nil + } + out := new(NnfLustreMGTSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTStatus) DeepCopyInto(out *NnfLustreMGTStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatus. +func (in *NnfLustreMGTStatus) DeepCopy() *NnfLustreMGTStatus { + if in == nil { + return nil + } + out := new(NnfLustreMGTStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNode) DeepCopyInto(out *NnfNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNode. +func (in *NnfNode) DeepCopy() *NnfNode { + if in == nil { + return nil + } + out := new(NnfNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorage) DeepCopyInto(out *NnfNodeBlockStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorage. +func (in *NnfNodeBlockStorage) DeepCopy() *NnfNodeBlockStorage { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageList) DeepCopyInto(out *NnfNodeBlockStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeBlockStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageList. +func (in *NnfNodeBlockStorageList) DeepCopy() *NnfNodeBlockStorageList { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageSpec) DeepCopyInto(out *NnfNodeBlockStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageSpec. +func (in *NnfNodeBlockStorageSpec) DeepCopy() *NnfNodeBlockStorageSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageStatus) DeepCopyInto(out *NnfNodeBlockStorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageStatus. +func (in *NnfNodeBlockStorageStatus) DeepCopy() *NnfNodeBlockStorageStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECData) DeepCopyInto(out *NnfNodeECData) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECData. +func (in *NnfNodeECData) DeepCopy() *NnfNodeECData { + if in == nil { + return nil + } + out := new(NnfNodeECData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeECData) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataList) DeepCopyInto(out *NnfNodeECDataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeECData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataList. +func (in *NnfNodeECDataList) DeepCopy() *NnfNodeECDataList { + if in == nil { + return nil + } + out := new(NnfNodeECDataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeECDataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataSpec) DeepCopyInto(out *NnfNodeECDataSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataSpec. +func (in *NnfNodeECDataSpec) DeepCopy() *NnfNodeECDataSpec { + if in == nil { + return nil + } + out := new(NnfNodeECDataSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeECDataStatus) DeepCopyInto(out *NnfNodeECDataStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataStatus. +func (in *NnfNodeECDataStatus) DeepCopy() *NnfNodeECDataStatus { + if in == nil { + return nil + } + out := new(NnfNodeECDataStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeList) DeepCopyInto(out *NnfNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeList. +func (in *NnfNodeList) DeepCopy() *NnfNodeList { + if in == nil { + return nil + } + out := new(NnfNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeSpec) DeepCopyInto(out *NnfNodeSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeSpec. +func (in *NnfNodeSpec) DeepCopy() *NnfNodeSpec { + if in == nil { + return nil + } + out := new(NnfNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStatus) DeepCopyInto(out *NnfNodeStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStatus. +func (in *NnfNodeStatus) DeepCopy() *NnfNodeStatus { + if in == nil { + return nil + } + out := new(NnfNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorage. +func (in *NnfNodeStorage) DeepCopy() *NnfNodeStorage { + if in == nil { + return nil + } + out := new(NnfNodeStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageList) DeepCopyInto(out *NnfNodeStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageList. +func (in *NnfNodeStorageList) DeepCopy() *NnfNodeStorageList { + if in == nil { + return nil + } + out := new(NnfNodeStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageSpec. +func (in *NnfNodeStorageSpec) DeepCopy() *NnfNodeStorageSpec { + if in == nil { + return nil + } + out := new(NnfNodeStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageStatus) DeepCopyInto(out *NnfNodeStorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageStatus. +func (in *NnfNodeStorageStatus) DeepCopy() *NnfNodeStorageStatus { + if in == nil { + return nil + } + out := new(NnfNodeStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManager) DeepCopyInto(out *NnfPortManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManager. +func (in *NnfPortManager) DeepCopy() *NnfPortManager { + if in == nil { + return nil + } + out := new(NnfPortManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerList) DeepCopyInto(out *NnfPortManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfPortManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerList. +func (in *NnfPortManagerList) DeepCopy() *NnfPortManagerList { + if in == nil { + return nil + } + out := new(NnfPortManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfPortManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerSpec) DeepCopyInto(out *NnfPortManagerSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerSpec. +func (in *NnfPortManagerSpec) DeepCopy() *NnfPortManagerSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerStatus) DeepCopyInto(out *NnfPortManagerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerStatus. +func (in *NnfPortManagerStatus) DeepCopy() *NnfPortManagerStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorage) DeepCopyInto(out *NnfStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorage. +func (in *NnfStorage) DeepCopy() *NnfStorage { + if in == nil { + return nil + } + out := new(NnfStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageList) DeepCopyInto(out *NnfStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageList. +func (in *NnfStorageList) DeepCopy() *NnfStorageList { + if in == nil { + return nil + } + out := new(NnfStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfile) DeepCopyInto(out *NnfStorageProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfile. +func (in *NnfStorageProfile) DeepCopy() *NnfStorageProfile { + if in == nil { + return nil + } + out := new(NnfStorageProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileList) DeepCopyInto(out *NnfStorageProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorageProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileList. +func (in *NnfStorageProfileList) DeepCopy() *NnfStorageProfileList { + if in == nil { + return nil + } + out := new(NnfStorageProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfStorageProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileSpec) DeepCopyInto(out *NnfStorageProfileSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileSpec. +func (in *NnfStorageProfileSpec) DeepCopy() *NnfStorageProfileSpec { + if in == nil { + return nil + } + out := new(NnfStorageProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileStatus) DeepCopyInto(out *NnfStorageProfileStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileStatus. +func (in *NnfStorageProfileStatus) DeepCopy() *NnfStorageProfileStatus { + if in == nil { + return nil + } + out := new(NnfStorageProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageSpec) DeepCopyInto(out *NnfStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageSpec. +func (in *NnfStorageSpec) DeepCopy() *NnfStorageSpec { + if in == nil { + return nil + } + out := new(NnfStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageStatus) DeepCopyInto(out *NnfStorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageStatus. +func (in *NnfStorageStatus) DeepCopy() *NnfStorageStatus { + if in == nil { + return nil + } + out := new(NnfStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorage) DeepCopyInto(out *NnfSystemStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorage. +func (in *NnfSystemStorage) DeepCopy() *NnfSystemStorage { + if in == nil { + return nil + } + out := new(NnfSystemStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfSystemStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageList) DeepCopyInto(out *NnfSystemStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfSystemStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageList. +func (in *NnfSystemStorageList) DeepCopy() *NnfSystemStorageList { + if in == nil { + return nil + } + out := new(NnfSystemStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfSystemStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageSpec) DeepCopyInto(out *NnfSystemStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageSpec. +func (in *NnfSystemStorageSpec) DeepCopy() *NnfSystemStorageSpec { + if in == nil { + return nil + } + out := new(NnfSystemStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfSystemStorageStatus) DeepCopyInto(out *NnfSystemStorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageStatus. +func (in *NnfSystemStorageStatus) DeepCopy() *NnfSystemStorageStatus { + if in == nil { + return nil + } + out := new(NnfSystemStorageStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 0c6103e9..eb776e1d 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -57,6 +57,7 @@ import ( nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" //+kubebuilder:scaffold:imports nnf "github.com/NearNodeFlash/nnf-ec/pkg" @@ -82,6 +83,7 @@ func init() { utilruntime.Must(mpiv2beta1.AddToScheme(scheme)) utilruntime.Must(nnfv1alpha2.AddToScheme(scheme)) utilruntime.Must(nnfv1alpha3.AddToScheme(scheme)) + utilruntime.Must(nnfv1alpha4.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 8996660e..5b4323b2 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -40,4 +40,18 @@ resources: - nnf_v1alpha3_nnfstorage.yaml - nnf_v1alpha3_nnfstorageprofile.yaml - nnf_v1alpha3_nnfsystemstorage.yaml +- nnf_v1alpha4_nnfaccess.yaml +- nnf_v1alpha4_nnfcontainerprofile.yaml +- nnf_v1alpha4_nnfdatamovement.yaml +- nnf_v1alpha4_nnfdatamovementmanager.yaml +- nnf_v1alpha4_nnfdatamovementprofile.yaml +- nnf_v1alpha4_nnflustremgt.yaml +- nnf_v1alpha4_nnfnode.yaml +- nnf_v1alpha4_nnfnodeblockstorage.yaml +- nnf_v1alpha4_nnfnodeecdata.yaml +- nnf_v1alpha4_nnfnodestorage.yaml +- nnf_v1alpha4_nnfportmanager.yaml +- nnf_v1alpha4_nnfstorage.yaml +- nnf_v1alpha4_nnfstorageprofile.yaml +- nnf_v1alpha4_nnfsystemstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/nnf_v1alpha4_nnfaccess.yaml b/config/samples/nnf_v1alpha4_nnfaccess.yaml new file mode 100644 index 00000000..4e88f902 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfaccess.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfAccess +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfaccess-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfcontainerprofile.yaml b/config/samples/nnf_v1alpha4_nnfcontainerprofile.yaml new file mode 100644 index 00000000..d1b26eb3 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfcontainerprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfContainerProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfcontainerprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfdatamovement.yaml b/config/samples/nnf_v1alpha4_nnfdatamovement.yaml new file mode 100644 index 00000000..12dc293b --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfdatamovement.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfDataMovement +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovement-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfdatamovementmanager.yaml b/config/samples/nnf_v1alpha4_nnfdatamovementmanager.yaml new file mode 100644 index 00000000..4928282b --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfdatamovementmanager.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfDataMovementManager +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovementmanager-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfdatamovementprofile.yaml b/config/samples/nnf_v1alpha4_nnfdatamovementprofile.yaml new file mode 100644 index 00000000..11c24ce7 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfdatamovementprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfDataMovementProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfdatamovementprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnflustremgt.yaml b/config/samples/nnf_v1alpha4_nnflustremgt.yaml new file mode 100644 index 00000000..6921380d --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnflustremgt.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfLustreMGT +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnflustremgt-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfnode.yaml b/config/samples/nnf_v1alpha4_nnfnode.yaml new file mode 100644 index 00000000..340b5a03 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfnode.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfNode +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnode-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfnodeblockstorage.yaml b/config/samples/nnf_v1alpha4_nnfnodeblockstorage.yaml new file mode 100644 index 00000000..65b29835 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfnodeblockstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfNodeBlockStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeblockstorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfnodeecdata.yaml b/config/samples/nnf_v1alpha4_nnfnodeecdata.yaml new file mode 100644 index 00000000..efaac815 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfnodeecdata.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfNodeECData +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodeecdata-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfnodestorage.yaml b/config/samples/nnf_v1alpha4_nnfnodestorage.yaml new file mode 100644 index 00000000..6ff9f50b --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfnodestorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfNodeStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfnodestorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfportmanager.yaml b/config/samples/nnf_v1alpha4_nnfportmanager.yaml new file mode 100644 index 00000000..d7c8c653 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfportmanager.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfPortManager +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfportmanager-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfstorage.yaml b/config/samples/nnf_v1alpha4_nnfstorage.yaml new file mode 100644 index 00000000..606d977c --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorage-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfstorageprofile.yaml b/config/samples/nnf_v1alpha4_nnfstorageprofile.yaml new file mode 100644 index 00000000..629ec9ae --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfstorageprofile.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfStorageProfile +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfstorageprofile-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/nnf_v1alpha4_nnfsystemstorage.yaml b/config/samples/nnf_v1alpha4_nnfsystemstorage.yaml new file mode 100644 index 00000000..7abae5d2 --- /dev/null +++ b/config/samples/nnf_v1alpha4_nnfsystemstorage.yaml @@ -0,0 +1,9 @@ +apiVersion: nnf.cray.hpe.com/v1alpha4 +kind: NnfSystemStorage +metadata: + labels: + app.kubernetes.io/name: nnf-sos + app.kubernetes.io/managed-by: kustomize + name: nnfsystemstorage-sample +spec: + # TODO(user): Add fields here From 81887d748467cff71dde9bb8268a75adf403c92b Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:19 -0600 Subject: [PATCH 02/23] CRDBUMPER-copy-api-content Copy API content from v1alpha3 to v1alpha4. Move the kubebuilder:storageversion marker from v1alpha3 to v1alpha4. Set localSchemeBuilder var in api/v1alpha3/groupversion_info.go to satisfy zz_generated.conversion.go. Signed-off-by: Blake Devcich --- api/v1alpha3/groupversion_info.go | 3 + api/v1alpha3/nnfaccess_types.go | 1 - api/v1alpha3/nnfcontainerprofile_types.go | 2 - api/v1alpha3/nnfdatamovement_types.go | 1 - api/v1alpha3/nnfdatamovementmanager_types.go | 1 - api/v1alpha3/nnfdatamovementprofile_types.go | 2 - api/v1alpha3/nnflustremgt_types.go | 1 - api/v1alpha3/nnfnode_types.go | 1 - api/v1alpha3/nnfnodeblockstorage_types.go | 1 - api/v1alpha3/nnfnodeecdata_types.go | 1 - api/v1alpha3/nnfnodestorage_types.go | 1 - api/v1alpha3/nnfportmanager_types.go | 1 - api/v1alpha3/nnfstorage_types.go | 1 - api/v1alpha3/nnfstorageprofile_types.go | 2 - api/v1alpha3/nnfsystemstorage_types.go | 1 - api/v1alpha4/nnf_resource_condition_types.go | 115 +++++++ api/v1alpha4/nnf_resource_health_type.go | 68 ++++ api/v1alpha4/nnf_resource_state_type.go | 48 +++ api/v1alpha4/nnf_resource_status_type.go | 152 +++++++++ api/v1alpha4/nnf_resource_type.go | 33 ++ api/v1alpha4/nnfaccess_types.go | 90 +++++- .../nnfaccess_webhook.go | 2 +- .../nnfaccess_webhook_test.go | 2 +- api/v1alpha4/nnfcontainerprofile_types.go | 104 +++++- .../nnfcontainerprofile_webhook.go | 2 +- .../nnfcontainerprofile_webhook_test.go | 2 +- api/v1alpha4/nnfdatamovement_types.go | 242 +++++++++++++- .../nnfdatamovement_webhook.go | 2 +- .../nnfdatamovement_webhook_test.go | 2 +- api/v1alpha4/nnfdatamovementmanager_types.go | 51 ++- .../nnfdatamovementmanager_webhook.go | 2 +- .../nnfdatamovementmanager_webhook_test.go | 2 +- api/v1alpha4/nnfdatamovementprofile_types.go | 89 +++++- .../nnfdatamovementprofile_webhook.go | 2 +- .../nnfdatamovementprofile_webhook_test.go | 2 +- api/v1alpha4/nnflustremgt_types.go | 67 +++- .../nnflustremgt_webhook.go | 2 +- .../nnflustremgt_webhook_test.go | 2 +- api/v1alpha4/nnfnode_types.go | 89 +++++- api/{v1alpha3 => v1alpha4}/nnfnode_webhook.go | 2 +- .../nnfnode_webhook_test.go | 2 +- api/v1alpha4/nnfnodeblockstorage_types.go | 98 +++++- .../nnfnodeblockstorage_webhook.go | 2 +- .../nnfnodeblockstorage_webhook_test.go | 2 +- api/v1alpha4/nnfnodeecdata_types.go | 16 +- .../nnfnodeecdata_webhook.go | 2 +- .../nnfnodeecdata_webhook_test.go | 2 +- api/v1alpha4/nnfnodestorage_types.go | 115 ++++++- .../nnfnodestorage_webhook.go | 2 +- .../nnfnodestorage_webhook_test.go | 2 +- api/v1alpha4/nnfportmanager_types.go | 87 ++++- .../nnfportmanager_webhook.go | 2 +- .../nnfportmanager_webhook_test.go | 2 +- api/v1alpha4/nnfstorage_types.go | 148 ++++++++- .../nnfstorage_webhook.go | 2 +- .../nnfstorage_webhook_test.go | 2 +- api/v1alpha4/nnfstorageprofile_types.go | 296 ++++++++++++++++-- .../nnfstorageprofile_webhook.go | 2 +- .../nnfstorageprofile_webhook_test.go | 2 +- api/v1alpha4/nnfsystemstorage_types.go | 95 +++++- .../nnfsystemstorage_webhook.go | 2 +- .../nnfsystemstorage_webhook_test.go | 2 +- .../webhook_suite_test.go | 2 +- api/v1alpha4/workflow_helpers.go | 73 +++++ 64 files changed, 1938 insertions(+), 216 deletions(-) create mode 100644 api/v1alpha4/nnf_resource_condition_types.go create mode 100644 api/v1alpha4/nnf_resource_health_type.go create mode 100644 api/v1alpha4/nnf_resource_state_type.go create mode 100644 api/v1alpha4/nnf_resource_status_type.go create mode 100644 api/v1alpha4/nnf_resource_type.go rename api/{v1alpha3 => v1alpha4}/nnfaccess_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfaccess_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfcontainerprofile_webhook.go (99%) rename api/{v1alpha3 => v1alpha4}/nnfcontainerprofile_webhook_test.go (99%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovement_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovement_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovementmanager_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovementmanager_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovementprofile_webhook.go (99%) rename api/{v1alpha3 => v1alpha4}/nnfdatamovementprofile_webhook_test.go (99%) rename api/{v1alpha3 => v1alpha4}/nnflustremgt_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnflustremgt_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnode_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnode_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodeblockstorage_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodeblockstorage_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodeecdata_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodeecdata_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodestorage_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfnodestorage_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfportmanager_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfportmanager_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfstorage_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfstorage_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfstorageprofile_webhook.go (99%) rename api/{v1alpha3 => v1alpha4}/nnfstorageprofile_webhook_test.go (99%) rename api/{v1alpha3 => v1alpha4}/nnfsystemstorage_webhook.go (98%) rename api/{v1alpha3 => v1alpha4}/nnfsystemstorage_webhook_test.go (98%) rename api/{v1alpha3 => v1alpha4}/webhook_suite_test.go (99%) create mode 100644 api/v1alpha4/workflow_helpers.go diff --git a/api/v1alpha3/groupversion_info.go b/api/v1alpha3/groupversion_info.go index 858a308e..ef51561a 100644 --- a/api/v1alpha3/groupversion_info.go +++ b/api/v1alpha3/groupversion_info.go @@ -36,4 +36,7 @@ var ( // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme + + // Used by zz_generated.conversion.go. + localSchemeBuilder = SchemeBuilder.SchemeBuilder ) diff --git a/api/v1alpha3/nnfaccess_types.go b/api/v1alpha3/nnfaccess_types.go index dcf9b38d..90377d29 100644 --- a/api/v1alpha3/nnfaccess_types.go +++ b/api/v1alpha3/nnfaccess_types.go @@ -87,7 +87,6 @@ type NnfAccessStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" //+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="The current state" //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="Whether the state has been achieved" diff --git a/api/v1alpha3/nnfcontainerprofile_types.go b/api/v1alpha3/nnfcontainerprofile_types.go index 1376b2c7..ac09f996 100644 --- a/api/v1alpha3/nnfcontainerprofile_types.go +++ b/api/v1alpha3/nnfcontainerprofile_types.go @@ -116,7 +116,6 @@ type NnfContainerProfileStorage struct { } // +kubebuilder:object:root=true -// +kubebuilder:storageversion // NnfContainerProfile is the Schema for the nnfcontainerprofiles API type NnfContainerProfile struct { @@ -127,7 +126,6 @@ type NnfContainerProfile struct { } // +kubebuilder:object:root=true -// +kubebuilder:storageversion // NnfContainerProfileList contains a list of NnfContainerProfile type NnfContainerProfileList struct { diff --git a/api/v1alpha3/nnfdatamovement_types.go b/api/v1alpha3/nnfdatamovement_types.go index 0c5342ce..04956d4b 100644 --- a/api/v1alpha3/nnfdatamovement_types.go +++ b/api/v1alpha3/nnfdatamovement_types.go @@ -219,7 +219,6 @@ const ( //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" //+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Status of current state" //+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" diff --git a/api/v1alpha3/nnfdatamovementmanager_types.go b/api/v1alpha3/nnfdatamovementmanager_types.go index b147f019..5580ab27 100644 --- a/api/v1alpha3/nnfdatamovementmanager_types.go +++ b/api/v1alpha3/nnfdatamovementmanager_types.go @@ -75,7 +75,6 @@ type NnfDataMovementManagerStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if manager readied all resoures" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1alpha3/nnfdatamovementprofile_types.go b/api/v1alpha3/nnfdatamovementprofile_types.go index 27b0dfd1..37d0c7d6 100644 --- a/api/v1alpha3/nnfdatamovementprofile_types.go +++ b/api/v1alpha3/nnfdatamovementprofile_types.go @@ -97,7 +97,6 @@ type NnfDataMovementProfileData struct { } // +kubebuilder:object:root=true -// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" @@ -110,7 +109,6 @@ type NnfDataMovementProfile struct { } // +kubebuilder:object:root=true -// +kubebuilder:storageversion // NnfDataMovementProfileList contains a list of NnfDataMovementProfile type NnfDataMovementProfileList struct { diff --git a/api/v1alpha3/nnflustremgt_types.go b/api/v1alpha3/nnflustremgt_types.go index 98fc3141..dfb09377 100644 --- a/api/v1alpha3/nnflustremgt_types.go +++ b/api/v1alpha3/nnflustremgt_types.go @@ -70,7 +70,6 @@ type NnfLustreMGTStatusClaim struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:storageversion // NnfLustreMGT is the Schema for the nnfstorageprofiles API type NnfLustreMGT struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1alpha3/nnfnode_types.go b/api/v1alpha3/nnfnode_types.go index 9feb06f9..0f4d615a 100644 --- a/api/v1alpha3/nnfnode_types.go +++ b/api/v1alpha3/nnfnode_types.go @@ -98,7 +98,6 @@ type NnfDriveStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.state",description="Current desired state" //+kubebuilder:printcolumn:name="HEALTH",type="string",JSONPath=".status.health",description="Health of node" //+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Current status of node" diff --git a/api/v1alpha3/nnfnodeblockstorage_types.go b/api/v1alpha3/nnfnodeblockstorage_types.go index 41806b57..73e66a67 100644 --- a/api/v1alpha3/nnfnodeblockstorage_types.go +++ b/api/v1alpha3/nnfnodeblockstorage_types.go @@ -97,7 +97,6 @@ type NnfNodeBlockStorageAllocationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" // +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1alpha3/nnfnodeecdata_types.go b/api/v1alpha3/nnfnodeecdata_types.go index 823b90c3..fc808059 100644 --- a/api/v1alpha3/nnfnodeecdata_types.go +++ b/api/v1alpha3/nnfnodeecdata_types.go @@ -44,7 +44,6 @@ type NnfNodeECPrivateData map[string]string //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion // NnfNodeECData is the Schema for the nnfnodeecdata API type NnfNodeECData struct { diff --git a/api/v1alpha3/nnfnodestorage_types.go b/api/v1alpha3/nnfnodestorage_types.go index a977b677..c90c746e 100644 --- a/api/v1alpha3/nnfnodestorage_types.go +++ b/api/v1alpha3/nnfnodestorage_types.go @@ -113,7 +113,6 @@ type NnfNodeStorageAllocationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:storageversion // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" // +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1alpha3/nnfportmanager_types.go b/api/v1alpha3/nnfportmanager_types.go index dd75dde3..fb180521 100644 --- a/api/v1alpha3/nnfportmanager_types.go +++ b/api/v1alpha3/nnfportmanager_types.go @@ -113,7 +113,6 @@ type NnfPortManagerStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion // NnfPortManager is the Schema for the nnfportmanagers API type NnfPortManager struct { diff --git a/api/v1alpha3/nnfstorage_types.go b/api/v1alpha3/nnfstorage_types.go index 841e2e9a..45b7a176 100644 --- a/api/v1alpha3/nnfstorage_types.go +++ b/api/v1alpha3/nnfstorage_types.go @@ -141,7 +141,6 @@ type NnfStorageStatus struct { //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" //+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" diff --git a/api/v1alpha3/nnfstorageprofile_types.go b/api/v1alpha3/nnfstorageprofile_types.go index 8b5b3e98..3560b5fa 100644 --- a/api/v1alpha3/nnfstorageprofile_types.go +++ b/api/v1alpha3/nnfstorageprofile_types.go @@ -275,7 +275,6 @@ type NnfStorageProfileData struct { } //+kubebuilder:object:root=true -// +kubebuilder:storageversion //+kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" @@ -288,7 +287,6 @@ type NnfStorageProfile struct { } //+kubebuilder:object:root=true -// +kubebuilder:storageversion // NnfStorageProfileList contains a list of NnfStorageProfile type NnfStorageProfileList struct { diff --git a/api/v1alpha3/nnfsystemstorage_types.go b/api/v1alpha3/nnfsystemstorage_types.go index cdb0628a..bfe0a7fb 100644 --- a/api/v1alpha3/nnfsystemstorage_types.go +++ b/api/v1alpha3/nnfsystemstorage_types.go @@ -105,7 +105,6 @@ type NnfSystemStorageStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:storageversion // NnfSystemStorage is the Schema for the nnfsystemstorages API type NnfSystemStorage struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1alpha4/nnf_resource_condition_types.go b/api/v1alpha4/nnf_resource_condition_types.go new file mode 100644 index 00000000..8f7adba2 --- /dev/null +++ b/api/v1alpha4/nnf_resource_condition_types.go @@ -0,0 +1,115 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Types define the condition type that is recorded by the system. Each storage resource +// defines an array of conditions as state transitions. Entry into and out of the state +// is recorded by the metav1.ConditionStatus. Order must be preserved and consistent between +// the Index and string values. +const ( + ConditionIndexCreateStoragePool = iota + ConditionIndexDeleteStoragePool + ConditionIndexCreateStorageGroup + ConditionIndexCreateFileSystem + ConditionIndexCreateFileShare + ConditionIndexGetResource + ConditionIndexInvalidResource + // INSERT NEW ITEMS HERE - Ensure Condition string is at same index + + numConditions + + ConditionCreateStoragePool = "CreateStoragePool" + ConditionDeleteStoragePool = "DeleteStoragePool" + ConditionCreateStorageGroup = "CreateStorageGroup" + ConditionCreateFileSystem = "CreateFileSystem" + ConditionCreateFileShare = "CreateFileShare" + ConditionGetResource = "GetResource" + ConditionInvalidResource = "InvalidResource" + // INSERT NEW ITEMS HERE - Ensure NewConditions() is updated to contain item and correct ordering +) + +// NewConditions generates a new conditions array for NNFNodeStorage +func NewConditions() []metav1.Condition { + + types := []string{ + ConditionCreateStoragePool, + ConditionDeleteStoragePool, + ConditionCreateStorageGroup, + ConditionCreateFileSystem, + ConditionCreateFileShare, + ConditionGetResource, + ConditionInvalidResource, + } + + if numConditions != len(types) { + panic("Did you forget to include the condition in the types array?") + } + + c := make([]metav1.Condition, len(types)) + for idx := range c { + c[idx] = metav1.Condition{ + Type: types[idx], + Status: metav1.ConditionUnknown, + Reason: ConditionUnknown, + LastTransitionTime: metav1.Now(), + } + } + + c[ConditionIndexCreateStoragePool].Status = metav1.ConditionTrue + c[ConditionIndexCreateStoragePool].LastTransitionTime = metav1.Now() + + return c + +} + +// SetGetResourceFailureCondition sets/gets the specified condition to failed +func SetGetResourceFailureCondition(c []metav1.Condition, err error) { + c[ConditionIndexGetResource] = metav1.Condition{ + Type: ConditionGetResource, + Reason: ConditionFailed, + Status: metav1.ConditionTrue, + Message: err.Error(), + LastTransitionTime: metav1.Now(), + } +} + +// SetResourceInvalidCondition sets/gets the specified condition to invalid +func SetResourceInvalidCondition(c []metav1.Condition, err error) { + c[ConditionIndexInvalidResource] = metav1.Condition{ + Type: ConditionInvalidResource, + Reason: ConditionInvalid, + Status: metav1.ConditionTrue, + Message: err.Error(), + LastTransitionTime: metav1.Now(), + } +} + +// Reason implements the Reason field of a metav1.Condition. In accordance with the metav1.Condition, +// the value should be a CamelCase string and may not be empty. +const ( + ConditionUnknown = "Unknown" + ConditionFailed = "Failed" + ConditionInvalid = "Invalid" + ConditionSuccess = "Success" +) diff --git a/api/v1alpha4/nnf_resource_health_type.go b/api/v1alpha4/nnf_resource_health_type.go new file mode 100644 index 00000000..4e590776 --- /dev/null +++ b/api/v1alpha4/nnf_resource_health_type.go @@ -0,0 +1,68 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" +) + +// NnfResourceHealthType defines the health of an NNF resource. +type NnfResourceHealthType string + +const ( + // ResourceOkay is SF health OK + ResourceOkay NnfResourceHealthType = NnfResourceHealthType(sf.OK_RH) + + // ResourceWarning is SF health WARNING + ResourceWarning = NnfResourceHealthType(sf.WARNING_RH) + + // ResourceCritical is SF health CRITICAL + ResourceCritical = NnfResourceHealthType(sf.CRITICAL_RH) +) + +// ResourceHealth maps a SF ResourceStatus to an NNFResourceHealthType +func ResourceHealth(s sf.ResourceStatus) NnfResourceHealthType { + switch s.Health { + case sf.OK_RH: + return ResourceOkay + case sf.WARNING_RH: + return ResourceWarning + case sf.CRITICAL_RH: + return ResourceCritical + } + + panic("Unknown Resource Health " + string(s.Health)) +} + +// UpdateIfWorseThan examines the input health type and update the health if it is worse +// than the stored value +func (rht NnfResourceHealthType) UpdateIfWorseThan(health *NnfResourceHealthType) { + switch rht { + case ResourceWarning: + if *health == ResourceOkay { + *health = ResourceWarning + } + case ResourceCritical: + if *health != ResourceCritical { + *health = ResourceCritical + } + default: + } +} diff --git a/api/v1alpha4/nnf_resource_state_type.go b/api/v1alpha4/nnf_resource_state_type.go new file mode 100644 index 00000000..e701050f --- /dev/null +++ b/api/v1alpha4/nnf_resource_state_type.go @@ -0,0 +1,48 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +// NnfResourceStateType defines valid states that a user can configure an NNF resource +type NnfResourceStateType string + +const ( + // + // Below reflects the current status of a static resource + // + + // ResourceEnable means this static NNF resource should be enabled. + ResourceEnable NnfResourceStateType = "Enable" + + // ResourceDisable means this static NNF resource should be disabled. Not all static resources can be disabled. + ResourceDisable = "Disable" + + // + // Below reflects the current status of a managed (user created) resource + // + + // ResourceCreate means the resource should be created and enabled for operation. For a newly + // created resource, the default state is create. + ResourceCreate NnfResourceStateType = "Create" + + // ResourceDestroy means the resource should be released from the allocated resource pool, and + // this resource and all child resources will be released to the free resource pools + // managed by the system. + ResourceDestroy = "Destroy" +) diff --git a/api/v1alpha4/nnf_resource_status_type.go b/api/v1alpha4/nnf_resource_status_type.go new file mode 100644 index 00000000..c7a4df91 --- /dev/null +++ b/api/v1alpha4/nnf_resource_status_type.go @@ -0,0 +1,152 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" +) + +// NnfResourceStatusType is the string that indicates the resource's status +type NnfResourceStatusType string + +const ( + // + // Below reflects the current status of a static resource + // + + // ResourceEnabled means the static NNF resource is enabled and ready to fullfil requests for + // managed resources. + ResourceEnabled NnfResourceStatusType = NnfResourceStatusType(sf.ENABLED_RST) + + // ResourceDisabled means the static NNF resource is present but disabled and not available for use + ResourceDisabled = NnfResourceStatusType(sf.DISABLED_RST) + + // ResourceNotPresent means the static NNF resource is not found; likely because it is disconnected + // or in a powered down state. + ResourceNotPresent = "NotPresent" + + // ResourceOffline means the static NNF resource is offline and the NNF Node cannot communicate with + // the resource. This differs from a NotPresent status in that the device is known to exist. + ResourceOffline = "Offline" + + // + // Below reflects the current status of a managed (user created) resource + // + + // ResourceStarting means the NNF resource is currently in the process of starting - resources + // are being prepared for transition to an Active state. + ResourceStarting = NnfResourceStatusType(sf.STARTING_RST) + + // ResourceDeleting means the NNF resource is currently in the process of being deleted - the resource + // and all child resources are being returned to the NNF node's free resources. Upon a successful + // deletion, the resource will be removed from the list of managed NNF resources + ResourceDeleting = "Deleting" + + // ResourceDeleted means the NNF resource was deleted. This reflects the state where the NNF resource does + // not exist in the NNF space, but the resource might still exist in Kubernetes. A resource in + // this state suggests that Kubernetes is unable to delete the object. + ResourceDeleted = "Deleted" + + // ResourceReady means the NNF resource is ready for use. + ResourceReady = "Ready" + + // ResourceFailed means the NNF resource has failed during startup or execution. A failed state is + // an unrecoverable condition. Additional information about the Failed cause can be found by + // looking at the owning resource's Conditions field. A failed resource can only be removed + // by transition to a Delete state. + ResourceFailed = "Failed" + + // ResourceInvalid means the NNF resource configuration is invalid due to an improper format or arrangement + // of listed resource parameters. + ResourceInvalid = "Invalid" +) + +// UpdateIfWorseThan updates the stored status of the resource if the new status is worse than what was stored +func (rst NnfResourceStatusType) UpdateIfWorseThan(status *NnfResourceStatusType) { + switch rst { + case ResourceStarting: + if *status == ResourceReady { + *status = ResourceStarting + } + case ResourceFailed: + if *status != ResourceFailed { + *status = ResourceFailed + } + default: + } +} + +func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha2.ResourceStatus { + switch rst { + case ResourceStarting: + return dwsv1alpha2.StartingStatus + case ResourceReady: + return dwsv1alpha2.ReadyStatus + case ResourceDisabled: + return dwsv1alpha2.DisabledStatus + case ResourceNotPresent: + return dwsv1alpha2.NotPresentStatus + case ResourceOffline: + return dwsv1alpha2.OfflineStatus + case ResourceFailed: + return dwsv1alpha2.FailedStatus + default: + return dwsv1alpha2.UnknownStatus + } +} + +// StaticResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. +func StaticResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { + switch s.State { + case sf.STARTING_RST: + return ResourceStarting + case sf.ENABLED_RST: + return ResourceReady + case sf.DISABLED_RST: + return ResourceDisabled + case sf.ABSENT_RST: + return ResourceNotPresent + case sf.UNAVAILABLE_OFFLINE_RST: + return ResourceOffline + } + + panic("Unknown Resource State " + string(s.State)) +} + +// ResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. +func ResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { + switch s.State { + case sf.STARTING_RST: + return ResourceStarting + case sf.ENABLED_RST: + return ResourceReady + case sf.DISABLED_RST: + return ResourceDisabled + case sf.ABSENT_RST: + return ResourceNotPresent + case sf.UNAVAILABLE_OFFLINE_RST: + return ResourceOffline + + default: + return ResourceFailed + } +} diff --git a/api/v1alpha4/nnf_resource_type.go b/api/v1alpha4/nnf_resource_type.go new file mode 100644 index 00000000..b7add0b6 --- /dev/null +++ b/api/v1alpha4/nnf_resource_type.go @@ -0,0 +1,33 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +// NnfResourceStatus provides common fields that are included in all NNF Resources +type NnfResourceStatus struct { + // ID reflects the NNF Node unique identifier for this NNF Server resource. + ID string `json:"id,omitempty"` + + // Name reflects the common name of this NNF Server resource. + Name string `json:"name,omitempty"` + + Status NnfResourceStatusType `json:"status,omitempty"` + + Health NnfResourceHealthType `json:"health,omitempty"` +} diff --git a/api/v1alpha4/nnfaccess_types.go b/api/v1alpha4/nnfaccess_types.go index d9745d24..2b33e617 100644 --- a/api/v1alpha4/nnfaccess_types.go +++ b/api/v1alpha4/nnfaccess_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,29 +20,79 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // NnfAccessSpec defines the desired state of NnfAccess type NnfAccessSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // DesiredState is the desired state for the mounts on the client + // +kubebuilder:validation:Enum=mounted;unmounted + DesiredState string `json:"desiredState"` + + // TeardownState is the desired state of the workflow for this NNF Access resource to + // be torn down and deleted. + // +kubebuilder:validation:Enum:=PreRun;PostRun;Teardown + // +kubebuilder:validation:Type:=string + TeardownState dwsv1alpha2.WorkflowState `json:"teardownState"` + + // Target specifies which storage targets the client should mount + // - single: Only one of the storage the client can access + // - all: All of the storage the client can access + // - shared: Multiple clients access the same storage + // +kubebuilder:validation:Enum=single;all;shared + Target string `json:"target"` + + // UserID for the new mount. Currently only used for raw + UserID uint32 `json:"userID"` + + // GroupID for the new mount. Currently only used for raw + GroupID uint32 `json:"groupID"` + + // ClientReference is for a client resource. (DWS) Computes is the only client + // resource type currently supported + ClientReference corev1.ObjectReference `json:"clientReference,omitempty"` + + // MountPath for the storage target on the client + MountPath string `json:"mountPath,omitempty"` + + // MakeClientMounts determines whether the ClientMount resources are made, or if only + // the access list on the NnfNodeBlockStorage is updated + // +kubebuilder:default=true + MakeClientMounts bool `json:"makeClientMounts"` + + // MountPathPrefix to mount the storage target on the client when there is + // more than one mount on a client - // Foo is an example field of NnfAccess. Edit nnfaccess_types.go to remove/update - Foo string `json:"foo,omitempty"` + MountPathPrefix string `json:"mountPathPrefix,omitempty"` + + // StorageReference is the NnfStorage reference + StorageReference corev1.ObjectReference `json:"storageReference"` } // NnfAccessStatus defines the observed state of NnfAccess type NnfAccessStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // State is the current state + // +kubebuilder:validation:Enum=mounted;unmounted + State string `json:"state"` + + // Ready signifies whether status.state has been achieved + Ready bool `json:"ready"` + + dwsv1alpha2.ResourceError `json:",inline"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="The current state" +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="Whether the state has been achieved" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfAccess is the Schema for the nnfaccesses API type NnfAccess struct { @@ -53,7 +103,11 @@ type NnfAccess struct { Status NnfAccessStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (a *NnfAccess) GetStatus() updater.Status[*NnfAccessStatus] { + return &a.Status +} + +//+kubebuilder:object:root=true // NnfAccessList contains a list of NnfAccess type NnfAccessList struct { @@ -62,6 +116,16 @@ type NnfAccessList struct { Items []NnfAccess `json:"items"` } +func (n *NnfAccessList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfAccess{}, &NnfAccessList{}) } diff --git a/api/v1alpha3/nnfaccess_webhook.go b/api/v1alpha4/nnfaccess_webhook.go similarity index 98% rename from api/v1alpha3/nnfaccess_webhook.go rename to api/v1alpha4/nnfaccess_webhook.go index 92a32437..a1eb93fd 100644 --- a/api/v1alpha3/nnfaccess_webhook.go +++ b/api/v1alpha4/nnfaccess_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfaccess_webhook_test.go b/api/v1alpha4/nnfaccess_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfaccess_webhook_test.go rename to api/v1alpha4/nnfaccess_webhook_test.go index eff625b4..e0e37fca 100644 --- a/api/v1alpha3/nnfaccess_webhook_test.go +++ b/api/v1alpha4/nnfaccess_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfcontainerprofile_types.go b/api/v1alpha4/nnfcontainerprofile_types.go index 3bab739d..ed22ffaf 100644 --- a/api/v1alpha4/nnfcontainerprofile_types.go +++ b/api/v1alpha4/nnfcontainerprofile_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,40 +20,114 @@ package v1alpha4 import ( + mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +const ( + ContainerLabel = "nnf.cray.hpe.com/container" + ContainerUser = "user" + ContainerMPIUser = "mpiuser" +) // NnfContainerProfileSpec defines the desired state of NnfContainerProfile -type NnfContainerProfileSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file +type NnfContainerProfileData struct { + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // List of possible filesystems supported by this container profile + Storages []NnfContainerProfileStorage `json:"storages,omitempty"` + + // Containers are launched in the PreRun state. Allow this many seconds for the containers to + // start before declaring an error to the workflow. + // Defaults to 300 if not set. A value of 0 disables this behavior. + // +kubebuilder:default:=300 + // +kubebuilder:validation:Minimum:=0 + PreRunTimeoutSeconds *int64 `json:"preRunTimeoutSeconds,omitempty"` + + // Containers are expected to complete in the PostRun State. Allow this many seconds for the + // containers to exit before declaring an error the workflow. + // Defaults to 300 if not set. A value of 0 disables this behavior. + // +kubebuilder:default:=300 + // +kubebuilder:validation:Minimum:=0 + PostRunTimeoutSeconds *int64 `json:"postRunTimeoutSeconds,omitempty"` + + // Specifies the number of times a container will be retried upon a failure. A new pod is + // deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 + // disables retries. + // +kubebuilder:validation:Minimum:=0 + // +kubebuilder:default:=6 + RetryLimit int32 `json:"retryLimit"` + + // UserID specifies the user ID that is allowed to use this profile. If this is specified, only + // Workflows that have a matching user ID can select this profile. + UserID *uint32 `json:"userID,omitempty"` - // Foo is an example field of NnfContainerProfile. Edit nnfcontainerprofile_types.go to remove/update - Foo string `json:"foo,omitempty"` + // GroupID specifies the group ID that is allowed to use this profile. If this is specified, + // only Workflows that have a matching group ID can select this profile. + GroupID *uint32 `json:"groupID,omitempty"` + + // Number of ports to open for communication with the user container. These ports are opened on + // the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). + // The requested ports are made available as environment variables inside the container and in + // the DWS workflow (NNF_CONTAINER_PORTS). + NumPorts int32 `json:"numPorts,omitempty"` + + // Spec to define the containers created from this profile. This is used for non-MPI containers. + // Refer to the K8s documentation for `PodSpec` for more definition: + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + // Either this or MPISpec must be provided, but not both. + Spec *corev1.PodSpec `json:"spec,omitempty"` + + // MPIJobSpec to define the MPI containers created from this profile. This functionality is + // provided via mpi-operator, a 3rd party tool to assist in running MPI applications across + // worker containers. + // Either this or Spec must be provided, but not both. + // + // All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for + // more detail: + // https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 + // + // Note: most of these fields are fully customizable with a few exceptions. These fields are + // overridden by NNF software to ensure proper behavior to interface with the DWS workflow + // - Replicas + // - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) + // - Worker/Launcher.RestartPolicy + MPISpec *mpiv2beta1.MPIJobSpec `json:"mpiSpec,omitempty"` } -// NnfContainerProfileStatus defines the observed state of NnfContainerProfile -type NnfContainerProfileStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file +// NnfContainerProfileStorage defines the mount point information that will be available to the +// container +type NnfContainerProfileStorage struct { + // Name specifies the name of the mounted filesystem; must match the user supplied #DW directive + Name string `json:"name"` + + // Optional designates that this filesystem is available to be mounted, but can be ignored by + // the user not supplying this filesystem in the #DW directives + //+kubebuilder:default:=false + Optional bool `json:"optional"` + + // For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in + // the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global + // lustre, otherwise empty. + PVCMode corev1.PersistentVolumeAccessMode `json:"pvcMode,omitempty"` } // +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// +kubebuilder:storageversion // NnfContainerProfile is the Schema for the nnfcontainerprofiles API type NnfContainerProfile struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec NnfContainerProfileSpec `json:"spec,omitempty"` - Status NnfContainerProfileStatus `json:"status,omitempty"` + Data NnfContainerProfileData `json:"data"` } // +kubebuilder:object:root=true +// +kubebuilder:storageversion // NnfContainerProfileList contains a list of NnfContainerProfile type NnfContainerProfileList struct { diff --git a/api/v1alpha3/nnfcontainerprofile_webhook.go b/api/v1alpha4/nnfcontainerprofile_webhook.go similarity index 99% rename from api/v1alpha3/nnfcontainerprofile_webhook.go rename to api/v1alpha4/nnfcontainerprofile_webhook.go index 17580ee6..64ec5556 100644 --- a/api/v1alpha3/nnfcontainerprofile_webhook.go +++ b/api/v1alpha4/nnfcontainerprofile_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "fmt" diff --git a/api/v1alpha3/nnfcontainerprofile_webhook_test.go b/api/v1alpha4/nnfcontainerprofile_webhook_test.go similarity index 99% rename from api/v1alpha3/nnfcontainerprofile_webhook_test.go rename to api/v1alpha4/nnfcontainerprofile_webhook_test.go index 400a8f9a..a2ceeb2e 100644 --- a/api/v1alpha3/nnfcontainerprofile_webhook_test.go +++ b/api/v1alpha4/nnfcontainerprofile_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/api/v1alpha4/nnfdatamovement_types.go b/api/v1alpha4/nnfdatamovement_types.go index 6edcc131..d1cb6ec7 100644 --- a/api/v1alpha4/nnfdatamovement_types.go +++ b/api/v1alpha4/nnfdatamovement_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,29 +20,210 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +const ( + // The required namespace for an NNF Data Movement operation. This is for system wide (lustre) + // data movement. Individual nodes may also perform data movement in which case they use the + // NNF Node Name as the namespace. + DataMovementNamespace = "nnf-dm-system" + + // The namespace for NnfDataMovementProfiles that are not pinned. + DataMovementProfileNamespace = "nnf-system" +) // NnfDataMovementSpec defines the desired state of NnfDataMovement type NnfDataMovementSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - // Foo is an example field of NnfDataMovement. Edit nnfdatamovement_types.go to remove/update - Foo string `json:"foo,omitempty"` + // Source describes the source of the data movement operation + Source *NnfDataMovementSpecSourceDestination `json:"source,omitempty"` + + // Destination describes the destination of the data movement operation + Destination *NnfDataMovementSpecSourceDestination `json:"destination,omitempty"` + + // User Id specifies the user ID for the data movement operation. This value is used + // in conjunction with the group ID to ensure the user has valid permissions to perform + // the data movement operation. + UserId uint32 `json:"userId,omitempty"` + + // Group Id specifies the group ID for the data movement operation. This value is used + // in conjunction with the user ID to ensure the user has valid permissions to perform + // the data movement operation. + GroupId uint32 `json:"groupId,omitempty"` + + // Set to true if the data movement operation should be canceled. + // +kubebuilder:default:=false + Cancel bool `json:"cancel,omitempty"` + + // ProfileReference is an object reference to an NnfDataMovementProfile that is used to + // configure data movement. If empty, the default profile is used. + ProfileReference corev1.ObjectReference `json:"profileReference,omitempty"` + + // User defined configuration on how data movement should be performed. This overrides the + // configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values + // are typically set by the Copy Offload API. + UserConfig *NnfDataMovementConfig `json:"userConfig,omitempty"` +} + +// NnfDataMovementSpecSourceDestination defines the desired source or destination of data movement +type NnfDataMovementSpecSourceDestination struct { + + // Path describes the location of the user data relative to the storage instance + Path string `json:"path,omitempty"` + + // Storage describes the storage backing this data movement specification; Storage can reference + // either NNF storage or global Lustre storage depending on the object references Kind field. + StorageReference corev1.ObjectReference `json:"storageReference,omitempty"` +} + +// NnfDataMovementConfig provides a way for a user to override the data movement behavior on a +// per DM basis. +type NnfDataMovementConfig struct { + + // Fake the Data Movement operation. The system "performs" Data Movement but the command to do so + // is trivial. This means a Data Movement request is still submitted but the IO is skipped. + // +kubebuilder:default:=false + Dryrun bool `json:"dryrun,omitempty"` + + // Extra options to pass to the mpirun command (used to perform data movement). + MpirunOptions string `json:"mpirunOptions,omitempty"` + + // Extra options to pass to the dcp command (used to perform data movement). + DcpOptions string `json:"dcpOptions,omitempty"` + + // If true, enable the command's stdout to be saved in the log when the command completes + // successfully. On failure, the output is always logged. + // Note: Enabling this option may degrade performance. + // +kubebuilder:default:=false + LogStdout bool `json:"logStdout,omitempty"` + + // Similar to LogStdout, store the command's stdout in Status.Message when the command completes + // successfully. On failure, the output is always stored. + // Note: Enabling this option may degrade performance. + // +kubebuilder:default:=false + StoreStdout bool `json:"storeStdout,omitempty"` + + // The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in + // the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + Slots *int `json:"slots,omitempty"` + + // The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots + // in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + MaxSlots *int `json:"maxSlots,omitempty"` +} + +// NnfDataMovementCommandStatus defines the observed status of the underlying data movement +// command (MPI File Utils' `dcp` command). +type NnfDataMovementCommandStatus struct { + // The command that was executed during data movement. + Command string `json:"command,omitempty"` + + // ElapsedTime reflects the elapsed time since the underlying data movement command started. + ElapsedTime metav1.Duration `json:"elapsedTime,omitempty"` + + // ProgressPercentage refects the progress of the underlying data movement command as captured from + // standard output. A best effort is made to parse the command output as a percentage. If no + // progress has yet to be measured than this field is omitted. If the latest command output does + // not contain a valid percentage, then the value is unchanged from the previously parsed value. + ProgressPercentage *int32 `json:"progress,omitempty"` + + // LastMessage reflects the last message received over standard output or standard error as + // captured by the underlying data movement command. + LastMessage string `json:"lastMessage,omitempty"` + + // LastMessageTime reflects the time at which the last message was received over standard output + // or standard error by the underlying data movement command. + LastMessageTime metav1.MicroTime `json:"lastMessageTime,omitempty"` + + // Seconds is parsed from the dcp output when the command is finished. + Seconds string `json:"seconds,omitempty"` + + // Items is parsed from the dcp output when the command is finished. This is a total of + // the number of directories, files, and links that dcp copied. + Items *int32 `json:"items,omitempty"` + + // Directories is parsed from the dcp output when the command is finished. This is the number of + // directories that dcp copied. Note: This value may be inflated due to NNF index mount + // directories when copying from XFS or GFS2 filesystems. + Directories *int32 `json:"directories,omitempty"` + + // Files is parsed from the dcp output when the command is finished. This is the number of files + // that dcp copied. + Files *int32 `json:"files,omitempty"` + + // Links is parsed from the dcp output when the command is finished. This is the number of links + // that dcp copied. + Links *int32 `json:"links,omitempty"` + + // Data is parsed from the dcp output when the command is finished. This is the total amount of + // data copied by dcp. + Data string `json:"data,omitempty"` + + // Rate is parsed from the dcp output when the command is finished. This is transfer rate of the + // data copied by dcp. + Rate string `json:"rate,omitempty"` } // NnfDataMovementStatus defines the observed state of NnfDataMovement type NnfDataMovementStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Current state of data movement. + // +kubebuilder:validation:Enum=Starting;Running;Finished + State string `json:"state,omitempty"` + + // Status of the current state. + // +kubebuilder:validation:Enum=Success;Failed;Invalid;Cancelled + Status string `json:"status,omitempty"` + + // Message contains any text that explains the Status. If Data Movement failed or storeStdout is + // enabled, this will contain the command's output. + Message string `json:"message,omitempty"` + + // StartTime reflects the time at which the Data Movement operation started. + StartTime *metav1.MicroTime `json:"startTime,omitempty"` + + // EndTime reflects the time at which the Data Movement operation ended. + EndTime *metav1.MicroTime `json:"endTime,omitempty"` + + // Restarts contains the number of restarts of the Data Movement operation. + Restarts int `json:"restarts,omitempty"` + + // CommandStatus reflects the current status of the underlying Data Movement command + // as it executes. The command status is polled at a certain frequency to avoid excessive + // updates to the Data Movement resource. + CommandStatus *NnfDataMovementCommandStatus `json:"commandStatus,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// Types describing the various data movement status conditions. +const ( + DataMovementConditionTypeStarting = "Starting" + DataMovementConditionTypeRunning = "Running" + DataMovementConditionTypeFinished = "Finished" +) + +// Reasons describing the various data movement status conditions. Must be +// in CamelCase format (see metav1.Condition) +const ( + DataMovementConditionReasonSuccess = "Success" + DataMovementConditionReasonFailed = "Failed" + DataMovementConditionReasonInvalid = "Invalid" + DataMovementConditionReasonCancelled = "Cancelled" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" +//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Status of current state" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfDataMovement is the Schema for the nnfdatamovements API type NnfDataMovement struct { @@ -53,7 +234,7 @@ type NnfDataMovement struct { Status NnfDataMovementStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // NnfDataMovementList contains a list of NnfDataMovement type NnfDataMovementList struct { @@ -62,6 +243,47 @@ type NnfDataMovementList struct { Items []NnfDataMovement `json:"items"` } +func (n *NnfDataMovementList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +const ( + // DataMovementTeardownStateLabel is the label applied to Data Movement and related resources that describes + // the workflow state when the resource is no longer need and can be safely deleted. + DataMovementTeardownStateLabel = "nnf.cray.hpe.com/teardown_state" + + // DataMovementInitiatorLabel is the label applied to Data Movement resources that describes the origin of + // data movement request. This would be from a copy_in/copy_out directive or from a compute node via the + // Copy Offload API (i.e. nnf-dm daemon). + DataMovementInitiatorLabel = "dm.cray.hpe.com/initiator" +) + +func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha2.WorkflowState) { + labels := object.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[DataMovementTeardownStateLabel] = string(state) + object.SetLabels(labels) +} + +func AddDataMovementInitiatorLabel(object metav1.Object, initiator string) { + labels := object.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[DataMovementInitiatorLabel] = initiator + object.SetLabels(labels) +} + func init() { SchemeBuilder.Register(&NnfDataMovement{}, &NnfDataMovementList{}) } diff --git a/api/v1alpha3/nnfdatamovement_webhook.go b/api/v1alpha4/nnfdatamovement_webhook.go similarity index 98% rename from api/v1alpha3/nnfdatamovement_webhook.go rename to api/v1alpha4/nnfdatamovement_webhook.go index d3e5d772..65756225 100644 --- a/api/v1alpha3/nnfdatamovement_webhook.go +++ b/api/v1alpha4/nnfdatamovement_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfdatamovement_webhook_test.go b/api/v1alpha4/nnfdatamovement_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfdatamovement_webhook_test.go rename to api/v1alpha4/nnfdatamovement_webhook_test.go index 6c2e85db..5c7f9c39 100644 --- a/api/v1alpha3/nnfdatamovement_webhook_test.go +++ b/api/v1alpha4/nnfdatamovement_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfdatamovementmanager_types.go b/api/v1alpha4/nnfdatamovementmanager_types.go index 3064dbf0..9a3b2683 100644 --- a/api/v1alpha4/nnfdatamovementmanager_types.go +++ b/api/v1alpha4/nnfdatamovementmanager_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,7 +20,19 @@ package v1alpha4 import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataWorkflowServices/dws/utils/updater" +) + +const ( + DataMovementWorkerLabel = "dm.cray.hpe.com/worker" + + // The name of the expected Data Movement manager. This is to ensure Data Movement is ready in + // the DataIn/DataOut stages before attempting data movement operations. + DataMovementManagerName = "nnf-dm-manager-controller-manager" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -31,18 +43,41 @@ type NnfDataMovementManagerSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - // Foo is an example field of NnfDataMovementManager. Edit nnfdatamovementmanager_types.go to remove/update - Foo string `json:"foo,omitempty"` + // Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated + // to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. + Selector metav1.LabelSelector `json:"selector"` + + // Template defines the pod template that is used for the basis of the worker Daemon Set that + // manages the per node data movement operations. + Template corev1.PodTemplateSpec `json:"template"` + + // UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set + // that manages the per node data movement operations. + UpdateStrategy appsv1.DaemonSetUpdateStrategy `json:"updateStrategy"` + + // Host Path defines the directory location of shared mounts on an individual worker node. + HostPath string `json:"hostPath"` + + // Mount Path defines the location within the container at which the Host Path volume should be mounted. + MountPath string `json:"mountPath"` } // NnfDataMovementManagerStatus defines the observed state of NnfDataMovementManager type NnfDataMovementManagerStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + // Ready indicates that the Data Movement Manager has achieved the desired readiness state + // and all managed resources are initialized. + // +kubebuilder:default:=false + Ready bool `json:"ready"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if manager readied all resoures" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfDataMovementManager is the Schema for the nnfdatamovementmanagers API type NnfDataMovementManager struct { @@ -53,7 +88,11 @@ type NnfDataMovementManager struct { Status NnfDataMovementManagerStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (m *NnfDataMovementManager) GetStatus() updater.Status[*NnfDataMovementManagerStatus] { + return &m.Status +} + +//+kubebuilder:object:root=true // NnfDataMovementManagerList contains a list of NnfDataMovementManager type NnfDataMovementManagerList struct { diff --git a/api/v1alpha3/nnfdatamovementmanager_webhook.go b/api/v1alpha4/nnfdatamovementmanager_webhook.go similarity index 98% rename from api/v1alpha3/nnfdatamovementmanager_webhook.go rename to api/v1alpha4/nnfdatamovementmanager_webhook.go index 5bfc914e..2cb0cdad 100644 --- a/api/v1alpha3/nnfdatamovementmanager_webhook.go +++ b/api/v1alpha4/nnfdatamovementmanager_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfdatamovementmanager_webhook_test.go b/api/v1alpha4/nnfdatamovementmanager_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfdatamovementmanager_webhook_test.go rename to api/v1alpha4/nnfdatamovementmanager_webhook_test.go index 013add9e..aa469c77 100644 --- a/api/v1alpha3/nnfdatamovementmanager_webhook_test.go +++ b/api/v1alpha4/nnfdatamovementmanager_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfdatamovementprofile_types.go b/api/v1alpha4/nnfdatamovementprofile_types.go index ce1cfb09..59fb2054 100644 --- a/api/v1alpha4/nnfdatamovementprofile_types.go +++ b/api/v1alpha4/nnfdatamovementprofile_types.go @@ -23,37 +23,94 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile +type NnfDataMovementProfileData struct { -// NnfDataMovementProfileSpec defines the desired state of NnfDataMovementProfile -type NnfDataMovementProfileSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Default is true if this instance is the default resource to use + // +kubebuilder:default:=false + Default bool `json:"default,omitempty"` - // Foo is an example field of NnfDataMovementProfile. Edit nnfdatamovementprofile_types.go to remove/update - Foo string `json:"foo,omitempty"` -} + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of + // slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + // +kubebuilder:default:=8 + // +kubebuilder:validation:Minimum:=0 + Slots int `json:"slots"` + + // MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the + // use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + // +kubebuilder:default:=0 + // +kubebuilder:validation:Minimum:=0 + MaxSlots int `json:"maxSlots"` + + // Command to execute to perform data movement. $VARS are replaced by the nnf software and must + // be present in the command. + // Available $VARS: + // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + // slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` + // UID: User ID that is inherited from the Workflow + // GID: Group ID that is inherited from the Workflow + // SRC: source for the data movement + // DEST destination for the data movement + // +kubebuilder:default:="ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST" + Command string `json:"command"` + + // If true, enable the command's stdout to be saved in the log when the command completes + // successfully. On failure, the output is always logged. + // +kubebuilder:default:=false + LogStdout bool `json:"logStdout,omitempty"` + + // Similar to logStdout, store the command's stdout in Status.Message when the command completes + // successfully. On failure, the output is always stored. + // +kubebuilder:default:=false + StoreStdout bool `json:"storeStdout,omitempty"` + + // NnfDataMovement resources have the ability to collect and store the progress percentage and the + // last few lines of output in the CommandStatus field. This number is used for the interval to collect + // the progress data. `dcp --progress N` must be included in the data movement command in order for + // progress to be collected. A value of 0 disables this functionality. + // +kubebuilder:default:=5 + // +kubebuilder:validation:Minimum:=0 + ProgressIntervalSeconds int `json:"progressIntervalSeconds,omitempty"` + + // CreateDestDir will ensure that the destination directory exists before performing data + // movement. This will cause a number of stat commands to determine the source and destination + // file types, so that the correct pathing for the destination can be determined. Then, a mkdir + // is issued. + // +kubebuilder:default:=true + CreateDestDir bool `json:"createDestDir"` -// NnfDataMovementProfileStatus defines the observed state of NnfDataMovementProfile -type NnfDataMovementProfileStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // If CreateDestDir is true, then use StatCommand to perform the stat commands. + // Use setpriv to stat the path with the specified UID/GID. + // Available $VARS: + // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + // slots/max_slots for each host. This hostfile is created at + // `/tmp//hostfile`. This is the same hostfile used as the one for Command. + // UID: User ID that is inherited from the Workflow + // GID: Group ID that is inherited from the Workflow + // PATH: Path to stat + // +kubebuilder:default:="mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- setpriv --euid $UID --egid $GID --clear-groups stat --cached never -c '%F' $PATH" + StatCommand string `json:"statCommand"` } // +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles API type NnfDataMovementProfile struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec NnfDataMovementProfileSpec `json:"spec,omitempty"` - Status NnfDataMovementProfileStatus `json:"status,omitempty"` + Data NnfDataMovementProfileData `json:"data,omitempty"` } // +kubebuilder:object:root=true +// +kubebuilder:storageversion // NnfDataMovementProfileList contains a list of NnfDataMovementProfile type NnfDataMovementProfileList struct { diff --git a/api/v1alpha3/nnfdatamovementprofile_webhook.go b/api/v1alpha4/nnfdatamovementprofile_webhook.go similarity index 99% rename from api/v1alpha3/nnfdatamovementprofile_webhook.go rename to api/v1alpha4/nnfdatamovementprofile_webhook.go index 8e3826c1..4b4223f7 100644 --- a/api/v1alpha3/nnfdatamovementprofile_webhook.go +++ b/api/v1alpha4/nnfdatamovementprofile_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "fmt" diff --git a/api/v1alpha3/nnfdatamovementprofile_webhook_test.go b/api/v1alpha4/nnfdatamovementprofile_webhook_test.go similarity index 99% rename from api/v1alpha3/nnfdatamovementprofile_webhook_test.go rename to api/v1alpha4/nnfdatamovementprofile_webhook_test.go index fde83d75..a9f04f67 100644 --- a/api/v1alpha3/nnfdatamovementprofile_webhook_test.go +++ b/api/v1alpha4/nnfdatamovementprofile_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/api/v1alpha4/nnflustremgt_types.go b/api/v1alpha4/nnflustremgt_types.go index 4143f680..60212924 100644 --- a/api/v1alpha4/nnflustremgt_types.go +++ b/api/v1alpha4/nnflustremgt_types.go @@ -20,31 +20,58 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // NnfLustreMGTSpec defines the desired state of NnfLustreMGT type NnfLustreMGTSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Addresses is the list of LNet addresses for the MGT + Addresses []string `json:"addresses"` + + // FsNameBlackList is a list of fsnames that can't be used. This may be + // necessary if the MGT hosts file systems external to Rabbit + FsNameBlackList []string `json:"fsNameBlackList,omitempty"` + + // FsNameStart is the starting fsname to be used + // +kubebuilder:validation:MaxLength:=8 + // +kubebuilder:validation:MinLength:=8 + FsNameStart string `json:"fsNameStart,omitempty"` + + // FsNameStartReference can be used to add a configmap where the starting fsname is + // stored. If this reference is set, it takes precendence over FsNameStart. The configmap + // will be updated with the next available fsname anytime an fsname is used. + FsNameStartReference corev1.ObjectReference `json:"fsNameStartReference,omitempty"` - // Foo is an example field of NnfLustreMGT. Edit nnflustremgt_types.go to remove/update - Foo string `json:"foo,omitempty"` + // ClaimList is the list of currently in use fsnames + ClaimList []corev1.ObjectReference `json:"claimList,omitempty"` } -// NnfLustreMGTStatus defines the observed state of NnfLustreMGT +// NnfLustreMGTStatus defines the current state of NnfLustreMGT type NnfLustreMGTStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // FsNameNext is the next available fsname that hasn't been used + // +kubebuilder:validation:MaxLength:=8 + // +kubebuilder:validation:MinLength:=8 + FsNameNext string `json:"fsNameNext,omitempty"` + + // ClaimList is the list of currently in use fsnames + ClaimList []NnfLustreMGTStatusClaim `json:"claimList,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +type NnfLustreMGTStatusClaim struct { + Reference corev1.ObjectReference `json:"reference,omitempty"` + FsName string `json:"fsname,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status - -// NnfLustreMGT is the Schema for the nnflustremgts API +// +kubebuilder:storageversion +// NnfLustreMGT is the Schema for the nnfstorageprofiles API type NnfLustreMGT struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,7 +80,11 @@ type NnfLustreMGT struct { Status NnfLustreMGTStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (a *NnfLustreMGT) GetStatus() updater.Status[*NnfLustreMGTStatus] { + return &a.Status +} + +//+kubebuilder:object:root=true // NnfLustreMGTList contains a list of NnfLustreMGT type NnfLustreMGTList struct { @@ -62,6 +93,16 @@ type NnfLustreMGTList struct { Items []NnfLustreMGT `json:"items"` } +func (n *NnfLustreMGTList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfLustreMGT{}, &NnfLustreMGTList{}) } diff --git a/api/v1alpha3/nnflustremgt_webhook.go b/api/v1alpha4/nnflustremgt_webhook.go similarity index 98% rename from api/v1alpha3/nnflustremgt_webhook.go rename to api/v1alpha4/nnflustremgt_webhook.go index 65fcd940..a037f7ac 100644 --- a/api/v1alpha3/nnflustremgt_webhook.go +++ b/api/v1alpha4/nnflustremgt_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnflustremgt_webhook_test.go b/api/v1alpha4/nnflustremgt_webhook_test.go similarity index 98% rename from api/v1alpha3/nnflustremgt_webhook_test.go rename to api/v1alpha4/nnflustremgt_webhook_test.go index d8e5c672..e0cf9b53 100644 --- a/api/v1alpha3/nnflustremgt_webhook_test.go +++ b/api/v1alpha4/nnflustremgt_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfnode_types.go b/api/v1alpha4/nnfnode_types.go index 85739d26..5d4e9a93 100644 --- a/api/v1alpha4/nnfnode_types.go +++ b/api/v1alpha4/nnfnode_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,31 +20,92 @@ package v1alpha4 import ( + "github.com/DataWorkflowServices/dws/utils/updater" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// NnfNodeSpec defines the desired state of NnfNode +// NnfNodeSpec defines the desired state of NNF Node type NnfNodeSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - // Foo is an example field of NnfNode. Edit nnfnode_types.go to remove/update - Foo string `json:"foo,omitempty"` + // The unique name for this NNF Node + Name string `json:"name,omitempty"` + + // Pod name for this NNF Node + Pod string `json:"pod,omitempty"` + + // State reflects the desired state of this NNF Node resource + // +kubebuilder:validation:Enum=Enable;Disable + State NnfResourceStateType `json:"state"` } -// NnfNodeStatus defines the observed state of NnfNode +// NnfNodeStatus defines the observed status of NNF Node type NnfNodeStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + // Status reflects the current status of the NNF Node + Status NnfResourceStatusType `json:"status,omitempty"` + + Health NnfResourceHealthType `json:"health,omitempty"` + + // Fenced is true when the NNF Node is fenced by the STONITH agent, and false otherwise. + Fenced bool `json:"fenced,omitempty"` + + // LNetNid is the LNet address for the NNF node + LNetNid string `json:"lnetNid,omitempty"` + + Capacity int64 `json:"capacity,omitempty"` + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` + + Servers []NnfServerStatus `json:"servers,omitempty"` + + Drives []NnfDriveStatus `json:"drives,omitempty"` +} + +// NnfServerStatus defines the observed status of servers connected to this NNF Node +type NnfServerStatus struct { + Hostname string `json:"hostname,omitempty"` + + NnfResourceStatus `json:",inline"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// NnfDriveStatus defines the observe status of drives connected to this NNF Node +type NnfDriveStatus struct { + // Model is the manufacturer information about the device + Model string `json:"model,omitempty"` + + // The serial number for this storage controller. + SerialNumber string `json:"serialNumber,omitempty"` + + // The firmware version of this storage controller. + FirmwareVersion string `json:"firmwareVersion,omitempty"` + + // Physical slot location of the storage controller. + Slot string `json:"slot,omitempty"` + + // Capacity in bytes of the device. The full capacity may not + // be usable depending on what the storage driver can provide. + Capacity int64 `json:"capacity,omitempty"` + + // WearLevel in percent for SSDs + WearLevel int64 `json:"wearLevel,omitempty"` -// NnfNode is the Schema for the nnfnodes API + NnfResourceStatus `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.state",description="Current desired state" +//+kubebuilder:printcolumn:name="HEALTH",type="string",JSONPath=".status.health",description="Health of node" +//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Current status of node" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="POD",type="string",JSONPath=".spec.pod",description="Parent pod name",priority=1 + +// NnfNode is the Schema for the NnfNode API type NnfNode struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,9 +114,13 @@ type NnfNode struct { Status NnfNodeStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (n *NnfNode) GetStatus() updater.Status[*NnfNodeStatus] { + return &n.Status +} + +//+kubebuilder:object:root=true -// NnfNodeList contains a list of NnfNode +// NnfNodeList contains a list of NNF Nodes type NnfNodeList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/api/v1alpha3/nnfnode_webhook.go b/api/v1alpha4/nnfnode_webhook.go similarity index 98% rename from api/v1alpha3/nnfnode_webhook.go rename to api/v1alpha4/nnfnode_webhook.go index 3b046862..4f1b9fd3 100644 --- a/api/v1alpha3/nnfnode_webhook.go +++ b/api/v1alpha4/nnfnode_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfnode_webhook_test.go b/api/v1alpha4/nnfnode_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfnode_webhook_test.go rename to api/v1alpha4/nnfnode_webhook_test.go index 5b3c21f8..93ce23ba 100644 --- a/api/v1alpha3/nnfnode_webhook_test.go +++ b/api/v1alpha4/nnfnode_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfnodeblockstorage_types.go b/api/v1alpha4/nnfnodeblockstorage_types.go index 13c80153..6e9eb137 100644 --- a/api/v1alpha4/nnfnodeblockstorage_types.go +++ b/api/v1alpha4/nnfnodeblockstorage_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,31 +20,87 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type NnfNodeBlockStorageAllocationSpec struct { + // Aggregate capacity of the block devices for each allocation + Capacity int64 `json:"capacity,omitempty"` -// NnfNodeBlockStorageSpec defines the desired state of NnfNodeBlockStorage + // List of nodes where /dev devices should be created + Access []string `json:"access,omitempty"` +} + +// NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. +// Storage spec are created on request of the user and fullfilled by the NNF Node Controller. type NnfNodeBlockStorageSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations + SharedAllocation bool `json:"sharedAllocation"` - // Foo is an example field of NnfNodeBlockStorage. Edit nnfnodeblockstorage_types.go to remove/update - Foo string `json:"foo,omitempty"` + // Allocations is the list of storage allocations to make + Allocations []NnfNodeBlockStorageAllocationSpec `json:"allocations,omitempty"` } -// NnfNodeBlockStorageStatus defines the observed state of NnfNodeBlockStorage type NnfNodeBlockStorageStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Allocations is the list of storage allocations that were made + Allocations []NnfNodeBlockStorageAllocationStatus `json:"allocations,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` + + // PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did + // last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths + // listed in the status section are from the current boot of the node. + PodStartTime metav1.Time `json:"podStartTime,omitempty"` + + Ready bool `json:"ready"` +} + +type NnfNodeBlockStorageDeviceStatus struct { + // NQN of the base NVMe device + NQN string `json:"NQN"` + + // Id of the Namespace on the NVMe device (e.g., "2") + NamespaceId string `json:"namespaceId"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity to satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` +} + +type NnfNodeBlockStorageAccessStatus struct { + // /dev paths for each of the block devices + DevicePaths []string `json:"devicePaths,omitempty"` + + // Redfish ID for the storage group + StorageGroupId string `json:"storageGroupId,omitempty"` +} + +type NnfNodeBlockStorageAllocationStatus struct { + // Accesses is a map of node name to the access status + Accesses map[string]NnfNodeBlockStorageAccessStatus `json:"accesses,omitempty"` + + // List of NVMe namespaces used by this allocation + Devices []NnfNodeBlockStorageDeviceStatus `json:"devices,omitempty"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity to satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` + + // Redfish ID for the storage pool + StoragePoolId string `json:"storagePoolId,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status - -// NnfNodeBlockStorage is the Schema for the nnfnodeblockstorages API +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" type NnfNodeBlockStorage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,15 +109,29 @@ type NnfNodeBlockStorage struct { Status NnfNodeBlockStorageStatus `json:"status,omitempty"` } +func (ns *NnfNodeBlockStorage) GetStatus() updater.Status[*NnfNodeBlockStorageStatus] { + return &ns.Status +} + // +kubebuilder:object:root=true -// NnfNodeBlockStorageList contains a list of NnfNodeBlockStorage +// NnfNodeBlockStorageList contains a list of NNF Nodes type NnfNodeBlockStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []NnfNodeBlockStorage `json:"items"` } +func (n *NnfNodeBlockStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfNodeBlockStorage{}, &NnfNodeBlockStorageList{}) } diff --git a/api/v1alpha3/nnfnodeblockstorage_webhook.go b/api/v1alpha4/nnfnodeblockstorage_webhook.go similarity index 98% rename from api/v1alpha3/nnfnodeblockstorage_webhook.go rename to api/v1alpha4/nnfnodeblockstorage_webhook.go index a0930428..47817621 100644 --- a/api/v1alpha3/nnfnodeblockstorage_webhook.go +++ b/api/v1alpha4/nnfnodeblockstorage_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfnodeblockstorage_webhook_test.go b/api/v1alpha4/nnfnodeblockstorage_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfnodeblockstorage_webhook_test.go rename to api/v1alpha4/nnfnodeblockstorage_webhook_test.go index 97081bee..a49c9844 100644 --- a/api/v1alpha3/nnfnodeblockstorage_webhook_test.go +++ b/api/v1alpha4/nnfnodeblockstorage_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfnodeecdata_types.go b/api/v1alpha4/nnfnodeecdata_types.go index 0dc8c19e..87120b04 100644 --- a/api/v1alpha4/nnfnodeecdata_types.go +++ b/api/v1alpha4/nnfnodeecdata_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -30,19 +30,21 @@ import ( type NnfNodeECDataSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - - // Foo is an example field of NnfNodeECData. Edit nnfnodeecdata_types.go to remove/update - Foo string `json:"foo,omitempty"` } // NnfNodeECDataStatus defines the observed state of NnfNodeECData type NnfNodeECDataStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + Data map[string]NnfNodeECPrivateData `json:"data,omitempty"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +type NnfNodeECPrivateData map[string]string + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion // NnfNodeECData is the Schema for the nnfnodeecdata API type NnfNodeECData struct { @@ -53,7 +55,7 @@ type NnfNodeECData struct { Status NnfNodeECDataStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // NnfNodeECDataList contains a list of NnfNodeECData type NnfNodeECDataList struct { diff --git a/api/v1alpha3/nnfnodeecdata_webhook.go b/api/v1alpha4/nnfnodeecdata_webhook.go similarity index 98% rename from api/v1alpha3/nnfnodeecdata_webhook.go rename to api/v1alpha4/nnfnodeecdata_webhook.go index 622c9e72..8e7e27c0 100644 --- a/api/v1alpha3/nnfnodeecdata_webhook.go +++ b/api/v1alpha4/nnfnodeecdata_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfnodeecdata_webhook_test.go b/api/v1alpha4/nnfnodeecdata_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfnodeecdata_webhook_test.go rename to api/v1alpha4/nnfnodeecdata_webhook_test.go index 572d7833..dc62e0d2 100644 --- a/api/v1alpha3/nnfnodeecdata_webhook_test.go +++ b/api/v1alpha4/nnfnodeecdata_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfnodestorage_types.go b/api/v1alpha4/nnfnodestorage_types.go index 28fa0856..6b6b014b 100644 --- a/api/v1alpha4/nnfnodestorage_types.go +++ b/api/v1alpha4/nnfnodestorage_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,31 +20,104 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// IMPORTANT: Run "make" to regenerate code after modifying this file // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// NnfNodeStorageSpec defines the desired state of NnfNodeStorage +// NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. +// Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. type NnfNodeStorageSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Count is the number of allocations to make on this node. All of the allocations will + // be created with the same parameters + // +kubebuilder:validation:Minimum:=0 + Count int `json:"count"` - // Foo is an example field of NnfNodeStorage. Edit nnfnodestorage_types.go to remove/update - Foo string `json:"foo,omitempty"` + // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations + SharedAllocation bool `json:"sharedAllocation"` + + // Capacity of an individual allocation + Capacity int64 `json:"capacity,omitempty"` + + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + + // FileSystemType defines the type of the desired filesystem, or raw + // block device. + // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre + // +kubebuilder:default:=raw + FileSystemType string `json:"fileSystemType,omitempty"` + + // LustreStorageSpec describes the Lustre target created here, if + // FileSystemType specifies a Lustre target. + LustreStorage LustreStorageSpec `json:"lustreStorage,omitempty"` + + // BlockReference is an object reference to an NnfNodeBlockStorage + BlockReference corev1.ObjectReference `json:"blockReference,omitempty"` } -// NnfNodeStorageStatus defines the observed state of NnfNodeStorage +// LustreStorageSpec describes the Lustre target to be created here. +type LustreStorageSpec struct { + // FileSystemName is the fsname parameter for the Lustre filesystem. + // +kubebuilder:validation:MaxLength:=8 + FileSystemName string `json:"fileSystemName,omitempty"` + + // TargetType is the type of Lustre target to be created. + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost + TargetType string `json:"targetType,omitempty"` + + // StartIndex is used to order a series of MDTs or OSTs. This is used only + // when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more + // than 1, then StartIndex is the index of the first allocation, and the indexes + // increment from there. + // +kubebuilder:validation:Minimum:=0 + StartIndex int `json:"startIndex,omitempty"` + + // MgsAddress is the NID of the MGS to use. This is used only when + // creating MDT and OST targets. + MgsAddress string `json:"mgsAddress,omitempty"` + + // BackFs is the type of backing filesystem to use. + // +kubebuilder:validation:Enum=ldiskfs;zfs + BackFs string `json:"backFs,omitempty"` +} + +// NnfNodeStorageStatus defines the status for NnfNodeStorage type NnfNodeStorageStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Allocations is the list of storage allocations that were made + Allocations []NnfNodeStorageAllocationStatus `json:"allocations,omitempty"` + + Ready bool `json:"ready,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` +} + +// NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage +type NnfNodeStorageAllocationStatus struct { + // Name of the LVM VG + VolumeGroup string `json:"volumeGroup,omitempty"` + + // Name of the LVM LV + LogicalVolume string `json:"logicalVolume,omitempty"` + + Ready bool `json:"ready,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status - -// NnfNodeStorage is the Schema for the nnfnodestorages API +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// NnfNodeStorage is the Schema for the NnfNodeStorage API type NnfNodeStorage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,15 +126,29 @@ type NnfNodeStorage struct { Status NnfNodeStorageStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (ns *NnfNodeStorage) GetStatus() updater.Status[*NnfNodeStorageStatus] { + return &ns.Status +} + +//+kubebuilder:object:root=true -// NnfNodeStorageList contains a list of NnfNodeStorage +// NnfNodeStorageList contains a list of NNF Nodes type NnfNodeStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []NnfNodeStorage `json:"items"` } +func (n *NnfNodeStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfNodeStorage{}, &NnfNodeStorageList{}) } diff --git a/api/v1alpha3/nnfnodestorage_webhook.go b/api/v1alpha4/nnfnodestorage_webhook.go similarity index 98% rename from api/v1alpha3/nnfnodestorage_webhook.go rename to api/v1alpha4/nnfnodestorage_webhook.go index 2cca6b0a..5b7977b4 100644 --- a/api/v1alpha3/nnfnodestorage_webhook.go +++ b/api/v1alpha4/nnfnodestorage_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfnodestorage_webhook_test.go b/api/v1alpha4/nnfnodestorage_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfnodestorage_webhook_test.go rename to api/v1alpha4/nnfnodestorage_webhook_test.go index ed03b8e4..6c9fe444 100644 --- a/api/v1alpha3/nnfnodestorage_webhook_test.go +++ b/api/v1alpha4/nnfnodestorage_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfportmanager_types.go b/api/v1alpha4/nnfportmanager_types.go index 6f00db0e..3732d491 100644 --- a/api/v1alpha4/nnfportmanager_types.go +++ b/api/v1alpha4/nnfportmanager_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,29 +20,100 @@ package v1alpha4 import ( + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// NnfPortManagerAllocationSpec defines the desired state for a single port allocation +type NnfPortManagerAllocationSpec struct { + // Requester is an object reference to the requester of a ports. + Requester corev1.ObjectReference `json:"requester"` + + // Count is the number of desired ports the requester needs. The port manager + // will attempt to allocate this many ports. + // +kubebuilder:default:=1 + Count int `json:"count"` +} + // NnfPortManagerSpec defines the desired state of NnfPortManager type NnfPortManagerSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - // Foo is an example field of NnfPortManager. Edit nnfportmanager_types.go to remove/update - Foo string `json:"foo,omitempty"` + // SystemConfiguration is an object reference to the system configuration. The + // Port Manager will use the available ports defined in the system configuration. + SystemConfiguration corev1.ObjectReference `json:"systemConfiguration"` + + // Allocations is a list of allocation requests that the Port Manager will attempt + // to satisfy. To request port resources from the port manager, clients should add + // an entry to the allocations. Entries must be unique. The port manager controller + // will attempt to allocate port resources for each allocation specification in the + // list. To remove an allocation and free up port resources, remove the allocation + // from the list. + Allocations []NnfPortManagerAllocationSpec `json:"allocations"` } +// AllocationStatus is the current status of a port requestor. A port that is in use by the respective owner +// will have a status of "InUse". A port that is freed by the owner but not yet reclaimed by the port manager +// will have a status of "Free". Any other status value indicates a failure of the port allocation. +// +kubebuilder:validation:Enum:=InUse;Free;Cooldown;InvalidConfiguration;InsufficientResources +type NnfPortManagerAllocationStatusStatus string + +const ( + NnfPortManagerAllocationStatusInUse NnfPortManagerAllocationStatusStatus = "InUse" + NnfPortManagerAllocationStatusFree NnfPortManagerAllocationStatusStatus = "Free" + NnfPortManagerAllocationStatusCooldown NnfPortManagerAllocationStatusStatus = "Cooldown" + NnfPortManagerAllocationStatusInvalidConfiguration NnfPortManagerAllocationStatusStatus = "InvalidConfiguration" + NnfPortManagerAllocationStatusInsufficientResources NnfPortManagerAllocationStatusStatus = "InsufficientResources" + // NOTE: You must ensure any new value is added to the above kubebuilder validation enum +) + +// NnfPortManagerAllocationStatus defines the allocation status of a port for a given requester. +type NnfPortManagerAllocationStatus struct { + // Requester is an object reference to the requester of the port resource, if one exists, or + // empty otherwise. + Requester *corev1.ObjectReference `json:"requester,omitempty"` + + // Ports is list of ports allocated to the owning resource. + Ports []uint16 `json:"ports,omitempty"` + + // Status is the ownership status of the port. + Status NnfPortManagerAllocationStatusStatus `json:"status"` + + // TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown + // duration. + TimeUnallocated *metav1.Time `json:"timeUnallocated,omitempty"` +} + +// PortManagerStatus is the current status of the port manager. +// +kubebuilder:validation:Enum:=Ready;SystemConfigurationNotFound +type NnfPortManagerStatusStatus string + +const ( + NnfPortManagerStatusReady NnfPortManagerStatusStatus = "Ready" + NnfPortManagerStatusSystemConfigurationNotFound NnfPortManagerStatusStatus = "SystemConfigurationNotFound" + // NOTE: You must ensure any new value is added in the above kubebuilder validation enum +) + // NnfPortManagerStatus defines the observed state of NnfPortManager type NnfPortManagerStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + // Allocations is a list of port allocation status'. + Allocations []NnfPortManagerAllocationStatus `json:"allocations,omitempty"` + + // Status is the current status of the port manager. + Status NnfPortManagerStatusStatus `json:"status"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion // NnfPortManager is the Schema for the nnfportmanagers API type NnfPortManager struct { @@ -53,7 +124,11 @@ type NnfPortManager struct { Status NnfPortManagerStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (mgr *NnfPortManager) GetStatus() updater.Status[*NnfPortManagerStatus] { + return &mgr.Status +} + +//+kubebuilder:object:root=true // NnfPortManagerList contains a list of NnfPortManager type NnfPortManagerList struct { diff --git a/api/v1alpha3/nnfportmanager_webhook.go b/api/v1alpha4/nnfportmanager_webhook.go similarity index 98% rename from api/v1alpha3/nnfportmanager_webhook.go rename to api/v1alpha4/nnfportmanager_webhook.go index 250bcd17..67d3343e 100644 --- a/api/v1alpha3/nnfportmanager_webhook.go +++ b/api/v1alpha4/nnfportmanager_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfportmanager_webhook_test.go b/api/v1alpha4/nnfportmanager_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfportmanager_webhook_test.go rename to api/v1alpha4/nnfportmanager_webhook_test.go index 9727e4f0..9aaf85b8 100644 --- a/api/v1alpha3/nnfportmanager_webhook_test.go +++ b/api/v1alpha4/nnfportmanager_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfstorage_types.go b/api/v1alpha4/nnfstorage_types.go index e4be22b3..f2577b24 100644 --- a/api/v1alpha4/nnfstorage_types.go +++ b/api/v1alpha4/nnfstorage_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -20,31 +20,133 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +const ( + AllocationSetLabel = "nnf.cray.hpe.com/allocationset" +) + +// NnfStorageAllocationNodes identifies the node and properties of the allocation to make on that node +type NnfStorageAllocationNodes struct { + // Name of the node to make the allocation on + Name string `json:"name"` + + // Number of allocations to make on this node + Count int `json:"count"` +} + +// NnfStorageLustreSpec defines the specifications for a Lustre filesystem +type NnfStorageLustreSpec struct { + // TargetType is the type of Lustre target to be created. + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost + TargetType string `json:"targetType,omitempty"` + + // BackFs is the type of backing filesystem to use. + // +kubebuilder:validation:Enum=ldiskfs;zfs + BackFs string `json:"backFs,omitempty"` + + // MgsAddress is the NID of the MGS when a pre-existing MGS is + // provided in the NnfStorageProfile + MgsAddress string `json:"mgsAddress,omitempty"` + + // PersistentMgsReference is a reference to a persistent storage that is providing + // the external MGS. + PersistentMgsReference corev1.ObjectReference `json:"persistentMgsReference,omitempty"` +} -// NnfStorageSpec defines the desired state of NnfStorage +// NnfStorageAllocationSetSpec defines the details for an allocation set +type NnfStorageAllocationSetSpec struct { + // Name is a human readable label for this set of allocations (e.g., xfs) + Name string `json:"name"` + + // Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself + // may split the storage among the available drives operating in the NNF Node. + Capacity int64 `json:"capacity"` + + // Lustre specific configuration + NnfStorageLustreSpec `json:",inline"` + + // SharedAllocation shares a single block storage allocation between multiple file system allocations + // (within the same workflow) on a Rabbit + SharedAllocation bool `json:"sharedAllocation"` + + // Nodes is the list of Rabbit nodes to make allocations on + Nodes []NnfStorageAllocationNodes `json:"nodes"` +} + +// NnfStorageSpec defines the specification for requesting generic storage on a set +// of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM +// making the determination for which NNF Nodes it wants to utilize. type NnfStorageSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - // Foo is an example field of NnfStorage. Edit nnfstorage_types.go to remove/update - Foo string `json:"foo,omitempty"` + // FileSystemType defines the type of the desired filesystem, or raw + // block device. + // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre + // +kubebuilder:default:=raw + FileSystemType string `json:"fileSystemType,omitempty"` + + // User ID for file system + UserID uint32 `json:"userID"` + + // Group ID for file system + GroupID uint32 `json:"groupID"` + + // AllocationSets is a list of different types of storage allocations to make. Each + // AllocationSet describes an entire allocation spanning multiple Rabbits. For example, + // an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw + // block devices in a raw block configuration. + AllocationSets []NnfStorageAllocationSetSpec `json:"allocationSets"` } -// NnfStorageStatus defines the observed state of NnfStorage +// NnfStorageAllocationSetStatus contains the status information for an allocation set +type NnfStorageAllocationSetStatus struct { + Ready bool `json:"ready,omitempty"` + + // AllocationCount is the total number of allocations that currently + // exist + AllocationCount int `json:"allocationCount"` +} + +type NnfStorageLustreStatus struct { + // MgsAddress is the NID of the MGS. + MgsAddress string `json:"mgsAddress,omitempty"` + + // FileSystemName is the fsname parameter for the Lustre filesystem. + // +kubebuilder:validation:MaxLength:=8 + FileSystemName string `json:"fileSystemName,omitempty"` + + // LustgreMgtReference is an object reference to the NnfLustreMGT resource used + // by the NnfStorage + LustreMgtReference corev1.ObjectReference `json:"lustreMgtReference,omitempty"` +} + +// NnfStorageStatus defines the observed status of NNF Storage. type NnfStorageStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + NnfStorageLustreStatus `json:",inline"` + + // AllocationsSets holds the status information for each of the AllocationSets + // from the spec. + AllocationSets []NnfStorageAllocationSetStatus `json:"allocationSets,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` + + // Ready reflects the status of this NNF Storage + Ready bool `json:"ready,omitempty"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -// NnfStorage is the Schema for the nnfstorages API +// NnfStorage is the Schema for the storages API type NnfStorage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,15 +155,29 @@ type NnfStorage struct { Status NnfStorageStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (s *NnfStorage) GetStatus() updater.Status[*NnfStorageStatus] { + return &s.Status +} + +//+kubebuilder:object:root=true -// NnfStorageList contains a list of NnfStorage +// NnfStorageList contains a list of Storage type NnfStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []NnfStorage `json:"items"` } +func (n *NnfStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfStorage{}, &NnfStorageList{}) } diff --git a/api/v1alpha3/nnfstorage_webhook.go b/api/v1alpha4/nnfstorage_webhook.go similarity index 98% rename from api/v1alpha3/nnfstorage_webhook.go rename to api/v1alpha4/nnfstorage_webhook.go index 8ee50014..715a943e 100644 --- a/api/v1alpha3/nnfstorage_webhook.go +++ b/api/v1alpha4/nnfstorage_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfstorage_webhook_test.go b/api/v1alpha4/nnfstorage_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfstorage_webhook_test.go rename to api/v1alpha4/nnfstorage_webhook_test.go index a53cd43c..b05cc05b 100644 --- a/api/v1alpha3/nnfstorage_webhook_test.go +++ b/api/v1alpha4/nnfstorage_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha4/nnfstorageprofile_types.go b/api/v1alpha4/nnfstorageprofile_types.go index 318a115f..6adc8b05 100644 --- a/api/v1alpha4/nnfstorageprofile_types.go +++ b/api/v1alpha4/nnfstorageprofile_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2024 Hewlett Packard Enterprise Development LP + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -21,39 +21,274 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// NnfStorageProfileLustreCmdLines defines commandlines to use for mkfs, zpool, and other utilities +// for Lustre allocations. +type NnfStorageProfileLustreCmdLines struct { + // ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + // This is where you may specify zpool create options, and the virtual device (vdev) such as + // "mirror", or "draid". See zpoolconcepts(7). + ZpoolCreate string `json:"zpoolCreate,omitempty"` -// NnfStorageProfileSpec defines the desired state of NnfStorageProfile -type NnfStorageProfileSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + // Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + // Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + Mkfs string `json:"mkfs,omitempty"` - // Foo is an example field of NnfStorageProfile. Edit nnfstorageprofile_types.go to remove/update - Foo string `json:"foo,omitempty"` + // MountTarget specifies the mount command line for the lustre target. + // For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + // argument to mkfs.lustre instead. + MountTarget string `json:"mountTarget,omitempty"` + + // PostActivate specifies a list of commands to run on the Rabbit after the + // Lustre target has been activated + PostActivate []string `json:"postActivate,omitempty"` + + // PreDeactivate specifies a list of commands to run on the Rabbit before the + // Lustre target is deactivated + PreDeactivate []string `json:"preDeactivate,omitempty"` +} + +// NnfStorageProfileLustreMiscOptions defines options to use for the mount library, and other utilities. +type NnfStorageProfileLustreMiscOptions struct { + // ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + // to the compute nodes in a workflow + // +kubebuilder:default:=false + ColocateComputes bool `json:"colocateComputes"` + + // Count specifies how many Lustre targets to create + // +kubebuilder:validation:Minimum:=1 + Count int `json:"count,omitempty"` + + // Scale provides a unitless value to determine how many Lustre targets to create + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=10 + Scale int `json:"scale,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` +} + +// NnfStorageProfileLustreData defines the Lustre-specific configuration +type NnfStorageProfileLustreData struct { + // CombinedMGTMDT indicates whether the MGT and MDT should be created on the same target device + // +kubebuilder:default:=false + CombinedMGTMDT bool `json:"combinedMgtMdt,omitempty"` + + // ExternalMGS specifies the use of an existing MGS rather than creating one. This can + // be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent + // Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format + // is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will + // be picked from the pool. + ExternalMGS string `json:"externalMgs,omitempty"` + + // CapacityMGT specifies the size of the MGT device. + // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" + // +kubebuilder:default:="5GiB" + CapacityMGT string `json:"capacityMgt,omitempty"` + + // CapacityMDT specifies the size of the MDT device. This is also + // used for a combined MGT+MDT device. + // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" + // +kubebuilder:default:="5GiB" + CapacityMDT string `json:"capacityMdt,omitempty"` + + // ExclusiveMDT indicates that the MDT should not be colocated with any other target on the chosen server. + // +kubebuilder:default:=false + ExclusiveMDT bool `json:"exclusiveMdt,omitempty"` + + // CapacityScalingFactor is a scaling factor for the OST capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` + + // StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating + // a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. + // Multiple pools can be created. + StandaloneMGTPoolName string `json:"standaloneMgtPoolName,omitempty"` + + // MgtCmdLines contains commands to create an MGT target. + MgtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtCommandlines,omitempty"` + + // MdtCmdLines contains commands to create an MDT target. + MdtCmdLines NnfStorageProfileLustreCmdLines `json:"mdtCommandlines,omitempty"` + + // MgtMdtCmdLines contains commands to create a combined MGT/MDT target. + MgtMdtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtMdtCommandlines,omitempty"` + + // OstCmdLines contains commands to create an OST target. + OstCmdLines NnfStorageProfileLustreCmdLines `json:"ostCommandlines,omitempty"` + + // MgtOptions contains options to use for libraries used for an MGT target. + MgtOptions NnfStorageProfileLustreMiscOptions `json:"mgtOptions,omitempty"` + + // MdtOptions contains options to use for libraries used for an MDT target. + MdtOptions NnfStorageProfileLustreMiscOptions `json:"mdtOptions,omitempty"` + + // MgtMdtOptions contains options to use for libraries used for a combined MGT/MDT target. + MgtMdtOptions NnfStorageProfileLustreMiscOptions `json:"mgtMdtOptions,omitempty"` + + // OstOptions contains options to use for libraries used for an OST target. + OstOptions NnfStorageProfileLustreMiscOptions `json:"ostOptions,omitempty"` + + // MountRabbit specifies mount options for making the Lustre client mount on the Rabbit. + MountRabbit string `json:"mountRabbit,omitempty"` + + // MountCompute specifies mount options for making the Lustre client mount on the Compute. + MountCompute string `json:"mountCompute,omitempty"` +} + +// NnfStorageProfileCmdLines defines commandlines to use for mkfs, and other utilities for storage +// allocations that use LVM and a simple file system type (e.g., gfs2) +type NnfStorageProfileCmdLines struct { + // Mkfs specifies the mkfs commandline, minus the "mkfs". + Mkfs string `json:"mkfs,omitempty"` + + // SharedVg specifies that allocations from a workflow on the same Rabbit should share an + // LVM VolumeGroup + // +kubebuilder:default:=false + SharedVg bool `json:"sharedVg,omitempty"` + + // PvCreate specifies the pvcreate commandline, minus the "pvcreate". + PvCreate string `json:"pvCreate,omitempty"` + + // PvRemove specifies the pvremove commandline, minus the "pvremove". + PvRemove string `json:"pvRemove,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgcreate". + VgCreate string `json:"vgCreate,omitempty"` + + // VgChange specifies the various vgchange commandlines, minus the "vgchange" + VgChange NnfStorageProfileLVMVgChangeCmdLines `json:"vgChange,omitempty"` + + // VgCreate specifies the vgcreate commandline, minus the "vgremove". + VgRemove string `json:"vgRemove,omitempty"` + + // LvCreate specifies the lvcreate commandline, minus the "lvcreate". + LvCreate string `json:"lvCreate,omitempty"` + + // LvChange specifies the various lvchange commandlines, minus the "lvchange" + LvChange NnfStorageProfileLVMLvChangeCmdLines `json:"lvChange,omitempty"` + + // LvRemove specifies the lvcreate commandline, minus the "lvremove". + LvRemove string `json:"lvRemove,omitempty"` + + // MountRabbit specifies mount options for mounting on the Rabbit. + MountRabbit string `json:"mountRabbit,omitempty"` + + // PreDeactivate specifies a list of commands to run on the Rabbit after the + // file system has been activated + PostActivate []string `json:"postActivate,omitempty"` + + // MountCompute specifies mount options for mounting on the Compute. + MountCompute string `json:"mountCompute,omitempty"` + + // PreDeactivate specifies a list of commands to run on the Rabbit before the + // file system is deactivated + PreDeactivate []string `json:"preDeactivate,omitempty"` +} + +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMVgChangeCmdLines struct { + // The vgchange commandline for lockStart, minus the "vgchange" command + LockStart string `json:"lockStart,omitempty"` + + // The vgchange commandline for lockStop, minus the "vgchange" command + LockStop string `json:"lockStop,omitempty"` +} + +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMLvChangeCmdLines struct { + // The lvchange commandline for activate, minus the "lvchange" command + Activate string `json:"activate,omitempty"` + + // The lvchange commandline for deactivate, minus the "lvchange" command + Deactivate string `json:"deactivate,omitempty"` } -// NnfStorageProfileStatus defines the observed state of NnfStorageProfile -type NnfStorageProfileStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file +// NnfStorageProfileGFS2Data defines the GFS2-specific configuration +type NnfStorageProfileGFS2Data struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// NnfStorageProfileXFSData defines the XFS-specific configuration +type NnfStorageProfileXFSData struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` +} + +// NnfStorageProfileRawData defines the Raw-specific configuration +type NnfStorageProfileRawData struct { + // CmdLines contains commands to create volumes and filesystems. + CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` + + // Storagelabels defines a list of labels that are added to the DirectiveBreakdown + // labels constraint. This restricts allocations to Storage resources with these labels + StorageLabels []string `json:"storageLabels,omitempty"` + + // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown + // +kubebuilder:default:="1.0" + CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` +} + +// NnfStorageProfileData defines the desired state of NnfStorageProfile +type NnfStorageProfileData struct { + + // Default is true if this instance is the default resource to use + // +kubebuilder:default:=false + Default bool `json:"default,omitempty"` + + // Pinned is true if this instance is an immutable copy + // +kubebuilder:default:=false + Pinned bool `json:"pinned,omitempty"` + + // LustreStorage defines the Lustre-specific configuration + LustreStorage NnfStorageProfileLustreData `json:"lustreStorage,omitempty"` + + // GFS2Storage defines the GFS2-specific configuration + GFS2Storage NnfStorageProfileGFS2Data `json:"gfs2Storage,omitempty"` + + // XFSStorage defines the XFS-specific configuration + XFSStorage NnfStorageProfileXFSData `json:"xfsStorage,omitempty"` + + // RawStorage defines the Raw-specific configuration + RawStorage NnfStorageProfileRawData `json:"rawStorage,omitempty"` +} + +//+kubebuilder:object:root=true +// +kubebuilder:storageversion +//+kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfStorageProfile is the Schema for the nnfstorageprofiles API type NnfStorageProfile struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec NnfStorageProfileSpec `json:"spec,omitempty"` - Status NnfStorageProfileStatus `json:"status,omitempty"` + Data NnfStorageProfileData `json:"data,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true +// +kubebuilder:storageversion // NnfStorageProfileList contains a list of NnfStorageProfile type NnfStorageProfileList struct { @@ -62,6 +297,31 @@ type NnfStorageProfileList struct { Items []NnfStorageProfile `json:"items"` } +func (n *NnfStorageProfile) GetLustreMiscOptions(target string) NnfStorageProfileLustreMiscOptions { + switch target { + case "mgt": + return n.Data.LustreStorage.MgtOptions + case "mdt": + return n.Data.LustreStorage.MdtOptions + case "mgtmdt": + return n.Data.LustreStorage.MgtMdtOptions + case "ost": + return n.Data.LustreStorage.OstOptions + default: + panic("Invalid target type") + } +} + +func (n *NnfStorageProfileList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfStorageProfile{}, &NnfStorageProfileList{}) } diff --git a/api/v1alpha3/nnfstorageprofile_webhook.go b/api/v1alpha4/nnfstorageprofile_webhook.go similarity index 99% rename from api/v1alpha3/nnfstorageprofile_webhook.go rename to api/v1alpha4/nnfstorageprofile_webhook.go index 623b1e67..8516f16a 100644 --- a/api/v1alpha3/nnfstorageprofile_webhook.go +++ b/api/v1alpha4/nnfstorageprofile_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "fmt" diff --git a/api/v1alpha3/nnfstorageprofile_webhook_test.go b/api/v1alpha4/nnfstorageprofile_webhook_test.go similarity index 99% rename from api/v1alpha3/nnfstorageprofile_webhook_test.go rename to api/v1alpha4/nnfstorageprofile_webhook_test.go index e3c81df9..c3dca2c4 100644 --- a/api/v1alpha3/nnfstorageprofile_webhook_test.go +++ b/api/v1alpha4/nnfstorageprofile_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/api/v1alpha4/nnfsystemstorage_types.go b/api/v1alpha4/nnfsystemstorage_types.go index 05e9fe35..7ee4ed9a 100644 --- a/api/v1alpha4/nnfsystemstorage_types.go +++ b/api/v1alpha4/nnfsystemstorage_types.go @@ -20,30 +20,92 @@ package v1alpha4 import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type NnfSystemStorageComputesTarget string + +const ( + ComputesTargetAll NnfSystemStorageComputesTarget = "all" + ComputesTargetEven NnfSystemStorageComputesTarget = "even" + ComputesTargetOdd NnfSystemStorageComputesTarget = "odd" + ComputesTargetPattern NnfSystemStorageComputesTarget = "pattern" +) // NnfSystemStorageSpec defines the desired state of NnfSystemStorage type NnfSystemStorageSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this + // field is empty, name: default namespace: default is used. + SystemConfiguration corev1.ObjectReference `json:"systemConfiguration,omitempty"` + + // ExludeRabbits is a list of Rabbits to exclude from the Rabbits in the SystemConfiguration + ExcludeRabbits []string `json:"excludeRabbits,omitempty"` + + // IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the + // SystemConfiguration + IncludeRabbits []string `json:"includeRabbits,omitempty"` + + // ExcludeDisabledRabbits looks at the Storage resource for a Rabbit and does not use it if it's + // marked as "disabled" + // +kubebuilder:default:=false + ExcludeDisabledRabbits bool `json:"excludeDisabledRabbits,omitempty"` + + // ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the + // SystemConfiguration + ExcludeComputes []string `json:"excludeComputes,omitempty"` + + // IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes + // from the SystemConfiguration + IncludeComputes []string `json:"includeComputes,omitempty"` + + // ComputesTarget specifies which computes to make the storage accessible to + // +kubebuilder:validation:Enum=all;even;odd;pattern + // +kubebuilder:default:=all + ComputesTarget NnfSystemStorageComputesTarget `json:"computesTarget,omitempty"` + + // ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This + // is only used if ComputesTarget is "pattern" + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:items:Maximum=15 + // +kubebuilder:validation:items:Minimum=0 + ComputesPattern []int `json:"computesPattern,omitempty"` + + // Capacity is the allocation size on each Rabbit + // +kubebuilder:default:=1073741824 + Capacity int64 `json:"capacity"` + + // Type is the file system type to use for the storage allocation + // +kubebuilder:validation:Enum=raw;xfs;gfs2 + // +kubebuilder:default:=raw + Type string `json:"type,omitempty"` - // Foo is an example field of NnfSystemStorage. Edit nnfsystemstorage_types.go to remove/update - Foo string `json:"foo,omitempty"` + // StorageProfile is an object reference to the storage profile to use + StorageProfile corev1.ObjectReference `json:"storageProfile"` + + // MakeClientMounts specifies whether to make ClientMount resources or just + // make the devices available to the client + // +kubebuilder:default:=false + MakeClientMounts bool `json:"makeClientMounts"` + + // ClientMountPath is an optional path for where to mount the file system on the computes + ClientMountPath string `json:"clientMountPath,omitempty"` } // NnfSystemStorageStatus defines the observed state of NnfSystemStorage type NnfSystemStorageStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Ready signifies whether all work has been completed + Ready bool `json:"ready"` + + dwsv1alpha2.ResourceError `json:",inline"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status - +// +kubebuilder:storageversion // NnfSystemStorage is the Schema for the nnfsystemstorages API type NnfSystemStorage struct { metav1.TypeMeta `json:",inline"` @@ -53,8 +115,11 @@ type NnfSystemStorage struct { Status NnfSystemStorageStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +func (a *NnfSystemStorage) GetStatus() updater.Status[*NnfSystemStorageStatus] { + return &a.Status +} +// +kubebuilder:object:root=true // NnfSystemStorageList contains a list of NnfSystemStorage type NnfSystemStorageList struct { metav1.TypeMeta `json:",inline"` @@ -62,6 +127,16 @@ type NnfSystemStorageList struct { Items []NnfSystemStorage `json:"items"` } +func (n *NnfSystemStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + func init() { SchemeBuilder.Register(&NnfSystemStorage{}, &NnfSystemStorageList{}) } diff --git a/api/v1alpha3/nnfsystemstorage_webhook.go b/api/v1alpha4/nnfsystemstorage_webhook.go similarity index 98% rename from api/v1alpha3/nnfsystemstorage_webhook.go rename to api/v1alpha4/nnfsystemstorage_webhook.go index fde987ce..b42c7f96 100644 --- a/api/v1alpha3/nnfsystemstorage_webhook.go +++ b/api/v1alpha4/nnfsystemstorage_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/api/v1alpha3/nnfsystemstorage_webhook_test.go b/api/v1alpha4/nnfsystemstorage_webhook_test.go similarity index 98% rename from api/v1alpha3/nnfsystemstorage_webhook_test.go rename to api/v1alpha4/nnfsystemstorage_webhook_test.go index 88af59b4..f75335ff 100644 --- a/api/v1alpha3/nnfsystemstorage_webhook_test.go +++ b/api/v1alpha4/nnfsystemstorage_webhook_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( . "github.com/onsi/ginkgo/v2" diff --git a/api/v1alpha3/webhook_suite_test.go b/api/v1alpha4/webhook_suite_test.go similarity index 99% rename from api/v1alpha3/webhook_suite_test.go rename to api/v1alpha4/webhook_suite_test.go index 14edae2d..3c719c31 100644 --- a/api/v1alpha3/webhook_suite_test.go +++ b/api/v1alpha4/webhook_suite_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha3 +package v1alpha4 import ( "context" diff --git a/api/v1alpha4/workflow_helpers.go b/api/v1alpha4/workflow_helpers.go new file mode 100644 index 00000000..cdc4635d --- /dev/null +++ b/api/v1alpha4/workflow_helpers.go @@ -0,0 +1,73 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +const ( + // DirectiveIndexLabel is a label applied to child objects of the workflow + // to show which directive they were created for. This is useful during deletion + // to filter the child objects by the directive index and only delete the + // resources for the directive being processed + DirectiveIndexLabel = "nnf.cray.hpe.com/directive_index" + + // TargetDirectiveIndexLabel is used for ClientMount resources to indicate the + // directive index of the storage they're targeting. + TargetDirectiveIndexLabel = "nnf.cray.hpe.com/target_directive_index" + + // TargetOwnerUidLabel is used for ClientMount resources to indicate the UID of the + // parent NnfStorage it's targeting + TargetOwnerUidLabel = "nnf.cray.hpe.com/target_owner_uid" + + // PinnedStorageProfileLabelName is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelName = "nnf.cray.hpe.com/pinned_storage_profile_name" + + // PinnedStorageProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned storage profile is being used. + PinnedStorageProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_storage_profile_namespace" + + // PinnedContainerProfileLabelName is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelName = "nnf.cray.hpe.com/pinned_container_profile_name" + + // PinnedContainerProfileLabelNameSpace is a label applied to NnfStorage objects to show + // which pinned container profile is being used. + PinnedContainerProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_container_profile_namespace" + + // StandaloneMGTLabel is a label applied to the PersistentStorageInstance to show that + // it is for a Lustre MGT only. The value for the label is the pool name. + StandaloneMGTLabel = "nnf.cray.hpe.com/standalone_mgt" + + // RabbitNodeSelectorLabel is a label applied to each k8s Node that is a Rabbit. + // It is used for scheduling NLCs onto the rabbits. + // (This is left in its legacy form because so many existing services are + // using it in their nodeSelector.) + RabbitNodeSelectorLabel = "cray.nnf.node" + + // TaintsAndLabelsCompletedLabel is a label applied to each k8s Node that is a Rabbit. + // It is used to indicate that the node has completed the process of applying + // the taints and labels that mark it as a rabbit. + TaintsAndLabelsCompletedLabel = "nnf.cray.hpe.com/taints_and_labels_completed" + + // RabbitNodeTaintKey is a taint key applied to each k8s Node that is a Rabbit. + // It is used for scheduling NLCs onto the rabbits. + // (This is left in its legacy form to avoid having existing clusters, + // which already have this taint, grind to a halt.) + RabbitNodeTaintKey = "cray.nnf.node" +) From 04e876b8fb70352e51fceca1c20ca7bdf31a52eb Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:20 -0600 Subject: [PATCH 03/23] CRDBUMPER-mv-webhooks Move the existing webhooks from v1alpha3 to v1alpha4. Signed-off-by: Blake Devcich --- PROJECT | 86 +++++++++---------- api/v1alpha4/nnfcontainerprofile_webhook.go | 2 +- .../nnfdatamovementprofile_webhook.go | 2 +- api/v1alpha4/nnfstorageprofile_webhook.go | 2 +- cmd/main.go | 28 +++--- config/webhook/manifests.yaml | 12 +-- 6 files changed, 66 insertions(+), 66 deletions(-) diff --git a/PROJECT b/PROJECT index 075ada57..5af03ca4 100644 --- a/PROJECT +++ b/PROJECT @@ -263,9 +263,6 @@ resources: kind: NnfAccess path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -274,9 +271,6 @@ resources: kind: NnfContainerProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - validation: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -285,9 +279,6 @@ resources: kind: NnfDataMovement path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -296,9 +287,6 @@ resources: kind: NnfDataMovementManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -307,9 +295,6 @@ resources: kind: NnfDataMovementProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - validation: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -318,9 +303,6 @@ resources: kind: NnfLustreMGT path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -329,9 +311,6 @@ resources: kind: NnfNode path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -340,9 +319,6 @@ resources: kind: NnfNodeBlockStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -351,9 +327,6 @@ resources: kind: NnfNodeECData path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -362,9 +335,6 @@ resources: kind: NnfNodeStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -373,9 +343,6 @@ resources: kind: NnfPortManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -384,9 +351,6 @@ resources: kind: NnfStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -395,9 +359,6 @@ resources: kind: NnfStorageProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - validation: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -406,9 +367,6 @@ resources: kind: NnfSystemStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha3 version: v1alpha3 - webhooks: - conversion: true - webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -417,6 +375,9 @@ resources: kind: NnfAccess path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -425,6 +386,9 @@ resources: kind: NnfContainerProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + validation: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -433,6 +397,9 @@ resources: kind: NnfDataMovement path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -441,6 +408,9 @@ resources: kind: NnfDataMovementManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -449,6 +419,9 @@ resources: kind: NnfDataMovementProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + validation: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -457,6 +430,9 @@ resources: kind: NnfLustreMGT path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -465,6 +441,9 @@ resources: kind: NnfNode path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -473,6 +452,9 @@ resources: kind: NnfNodeBlockStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -481,6 +463,9 @@ resources: kind: NnfNodeECData path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -489,6 +474,9 @@ resources: kind: NnfNodeStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -497,6 +485,9 @@ resources: kind: NnfPortManager path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -505,6 +496,9 @@ resources: kind: NnfStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + conversion: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -513,6 +507,9 @@ resources: kind: NnfStorageProfile path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 + webhooks: + validation: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: true @@ -521,4 +518,7 @@ resources: kind: NnfSystemStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha4 version: v1alpha4 -version: "3" + webhooks: + conversion: true + webhookVersion: v1 +version: '3' diff --git a/api/v1alpha4/nnfcontainerprofile_webhook.go b/api/v1alpha4/nnfcontainerprofile_webhook.go index 64ec5556..bd361929 100644 --- a/api/v1alpha4/nnfcontainerprofile_webhook.go +++ b/api/v1alpha4/nnfcontainerprofile_webhook.go @@ -45,7 +45,7 @@ func (r *NnfContainerProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha3-nnfcontainerprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfcontainerprofiles,verbs=create;update,versions=v1alpha3,name=vnnfcontainerprofile.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha4-nnfcontainerprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfcontainerprofiles,verbs=create;update,versions=v1alpha4,name=vnnfcontainerprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfContainerProfile{} diff --git a/api/v1alpha4/nnfdatamovementprofile_webhook.go b/api/v1alpha4/nnfdatamovementprofile_webhook.go index 4b4223f7..b62c157d 100644 --- a/api/v1alpha4/nnfdatamovementprofile_webhook.go +++ b/api/v1alpha4/nnfdatamovementprofile_webhook.go @@ -43,7 +43,7 @@ func (r *NnfDataMovementProfile) SetupWebhookWithManager(mgr ctrl.Manager) error // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -// +kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha3-nnfdatamovementprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfdatamovementprofiles,verbs=create;update,versions=v1alpha3,name=vnnfdatamovementprofile.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha4-nnfdatamovementprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfdatamovementprofiles,verbs=create;update,versions=v1alpha4,name=vnnfdatamovementprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfDataMovementProfile{} diff --git a/api/v1alpha4/nnfstorageprofile_webhook.go b/api/v1alpha4/nnfstorageprofile_webhook.go index 8516f16a..54428669 100644 --- a/api/v1alpha4/nnfstorageprofile_webhook.go +++ b/api/v1alpha4/nnfstorageprofile_webhook.go @@ -43,7 +43,7 @@ func (r *NnfStorageProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha3-nnfstorageprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=create;update,versions=v1alpha3,name=vnnfstorageprofile.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-nnf-cray-hpe-com-v1alpha4-nnfstorageprofile,mutating=false,failurePolicy=fail,sideEffects=None,groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=create;update,versions=v1alpha4,name=vnnfstorageprofile.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &NnfStorageProfile{} diff --git a/cmd/main.go b/cmd/main.go index eb776e1d..75f9b3c8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -380,88 +380,88 @@ func (c *storageController) SetupReconcilers(mgr manager.Manager, opts *nnf.Opti var err error if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfStorageProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfStorageProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfStorageProfile") return err } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfContainerProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfContainerProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfContainerProfile") return err } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfDataMovementProfile{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfDataMovementProfile{}).SetupWebhookWithManager(mgr); err != nil { ctrl.Log.Error(err, "unable to create webhook", "webhook", "NnfDataMovementProfile") return err } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfAccess{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfAccess{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfAccess") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfDataMovement{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfDataMovement{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfDataMovement") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfDataMovementManager{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfDataMovementManager{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfDataMovementManager") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfLustreMGT{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfLustreMGT{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfLustreMGT") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfNode{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfNode{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfNode") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfNodeBlockStorage{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfNodeBlockStorage{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeBlockStorage") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfNodeECData{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfNodeECData{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeECData") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfNodeStorage{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfNodeStorage{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfNodeStorage") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfPortManager{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfPortManager{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfPortManager") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfStorage{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfStorage{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfStorage") os.Exit(1) } } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&nnfv1alpha3.NnfSystemStorage{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&nnfv1alpha4.NnfSystemStorage{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "NnfSystemStorage") os.Exit(1) } diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 3d3637c6..0374a396 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -10,14 +10,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha3-nnfcontainerprofile + path: /validate-nnf-cray-hpe-com-v1alpha4-nnfcontainerprofile failurePolicy: Fail name: vnnfcontainerprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha3 + - v1alpha4 operations: - CREATE - UPDATE @@ -30,14 +30,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha3-nnfdatamovementprofile + path: /validate-nnf-cray-hpe-com-v1alpha4-nnfdatamovementprofile failurePolicy: Fail name: vnnfdatamovementprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha3 + - v1alpha4 operations: - CREATE - UPDATE @@ -50,14 +50,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-nnf-cray-hpe-com-v1alpha3-nnfstorageprofile + path: /validate-nnf-cray-hpe-com-v1alpha4-nnfstorageprofile failurePolicy: Fail name: vnnfstorageprofile.kb.io rules: - apiGroups: - nnf.cray.hpe.com apiVersions: - - v1alpha3 + - v1alpha4 operations: - CREATE - UPDATE From 5b1c771dae2d0fa047589b331cc27fcbbef6f806 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:21 -0600 Subject: [PATCH 04/23] CRDBUMPER-conversion-webhooks Create conversion webhooks and hub routines for v1alpha4. This may have used "kubebuilder create webhook --conversion" for any API that did not already have a webhook. Any newly-created api/v1alpha4/*_webhook_test.go is empty and does not need content at this time. It has been updated with a comment to explain where conversion tests are located. ACTION: Any new tests added to github/cluster-api/util/conversion/conversion_test.go may need to be manually adjusted. Look for the "ACTION" comments in this file. This may have added a new SetupWebhookWithManager() to suite_test.go, though a later step will complete the changes to that file. Signed-off-by: Blake Devcich --- api/v1alpha4/conversion.go | 51 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 api/v1alpha4/conversion.go diff --git a/api/v1alpha4/conversion.go b/api/v1alpha4/conversion.go new file mode 100644 index 00000000..342f91c1 --- /dev/null +++ b/api/v1alpha4/conversion.go @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha4 + +func (*NnfAccess) Hub() {} +func (*NnfContainerProfile) Hub() {} +func (*NnfDataMovement) Hub() {} +func (*NnfDataMovementManager) Hub() {} +func (*NnfDataMovementProfile) Hub() {} +func (*NnfLustreMGT) Hub() {} +func (*NnfNode) Hub() {} +func (*NnfNodeBlockStorage) Hub() {} +func (*NnfNodeECData) Hub() {} +func (*NnfNodeStorage) Hub() {} +func (*NnfPortManager) Hub() {} +func (*NnfStorage) Hub() {} +func (*NnfStorageProfile) Hub() {} +func (*NnfSystemStorage) Hub() {} + +// The conversion-verifier tool wants these...though they're never used. +func (*NnfAccessList) Hub() {} +func (*NnfContainerProfileList) Hub() {} +func (*NnfDataMovementList) Hub() {} +func (*NnfDataMovementManagerList) Hub() {} +func (*NnfDataMovementProfileList) Hub() {} +func (*NnfLustreMGTList) Hub() {} +func (*NnfNodeList) Hub() {} +func (*NnfNodeBlockStorageList) Hub() {} +func (*NnfNodeECDataList) Hub() {} +func (*NnfNodeStorageList) Hub() {} +func (*NnfPortManagerList) Hub() {} +func (*NnfStorageList) Hub() {} +func (*NnfStorageProfileList) Hub() {} +func (*NnfSystemStorageList) Hub() {} From a7c639d25fe7c0ab2967063480def608e0600fc6 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:22 -0600 Subject: [PATCH 05/23] CRDBUMPER-conversion-gen Create conversion routines and tests for v1alpha3. Switch api/v1alpha3/conversion.go content from hub to spoke. These conversion.go ConvertTo()/ConvertFrom() routines are complete and do not require manual adjustment at this time, because v1alpha3 is currently identical to the new hub v1alpha4. ACTION: The api/v1alpha3/conversion_test.go may need to be manually adjusted for your needs, especially if it has been manually adjusted in earlier spokes. ACTION: Any new tests added to internal/controller/conversion_test.go may need to be manually adjusted. This added api/v1alpha3/doc.go to hold the k8s:conversion-gen marker that points to the new hub. Signed-off-by: Blake Devcich --- api/v1alpha3/conversion.go | 610 +++++++++++++++++++++++-- api/v1alpha3/conversion_test.go | 107 +++++ api/v1alpha3/doc.go | 23 + internal/controller/conversion_test.go | 369 ++++++++++++--- 4 files changed, 1021 insertions(+), 88 deletions(-) create mode 100644 api/v1alpha3/conversion_test.go create mode 100644 api/v1alpha3/doc.go diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index ba2fc0ad..db8e7da2 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -19,33 +19,583 @@ package v1alpha3 -func (*NnfAccess) Hub() {} -func (*NnfContainerProfile) Hub() {} -func (*NnfDataMovement) Hub() {} -func (*NnfDataMovementManager) Hub() {} -func (*NnfDataMovementProfile) Hub() {} -func (*NnfLustreMGT) Hub() {} -func (*NnfNode) Hub() {} -func (*NnfNodeBlockStorage) Hub() {} -func (*NnfNodeECData) Hub() {} -func (*NnfNodeStorage) Hub() {} -func (*NnfPortManager) Hub() {} -func (*NnfStorage) Hub() {} -func (*NnfStorageProfile) Hub() {} -func (*NnfSystemStorage) Hub() {} - -// The conversion-verifier tool wants these...though they're never used. -func (*NnfAccessList) Hub() {} -func (*NnfContainerProfileList) Hub() {} -func (*NnfDataMovementList) Hub() {} -func (*NnfDataMovementManagerList) Hub() {} -func (*NnfDataMovementProfileList) Hub() {} -func (*NnfLustreMGTList) Hub() {} -func (*NnfNodeList) Hub() {} -func (*NnfNodeBlockStorageList) Hub() {} -func (*NnfNodeECDataList) Hub() {} -func (*NnfNodeStorageList) Hub() {} -func (*NnfPortManagerList) Hub() {} -func (*NnfStorageList) Hub() {} -func (*NnfStorageProfileList) Hub() {} -func (*NnfSystemStorageList) Hub() {} +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" + utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" +) + +var convertlog = logf.Log.V(2).WithName("convert-v1alpha3") + +func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfAccess To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfAccess) + + if err := Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfAccess{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfAccess) + convertlog.Info("Convert NnfAccess From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfContainerProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfContainerProfile) + + if err := Convert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfContainerProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfContainerProfile) + convertlog.Info("Convert NnfContainerProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovement To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovement) + + if err := Convert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfDataMovement{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfDataMovement) + convertlog.Info("Convert NnfDataMovement From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovementManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementManager) + + if err := Convert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfDataMovementManager{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfDataMovementManager) + convertlog.Info("Convert NnfDataMovementManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfDataMovementProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementProfile) + + if err := Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfDataMovementProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfDataMovementProfile) + convertlog.Info("Convert NnfDataMovementProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfLustreMGT To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfLustreMGT) + + if err := Convert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfLustreMGT{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfLustreMGT) + convertlog.Info("Convert NnfLustreMGT From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNode To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfNode) + + if err := Convert_v1alpha3_NnfNode_To_v1alpha4_NnfNode(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfNode{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfNode) + convertlog.Info("Convert NnfNode From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfNode_To_v1alpha3_NnfNode(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeBlockStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfNodeBlockStorage) + + if err := Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfNodeBlockStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfNodeBlockStorage) + convertlog.Info("Convert NnfNodeBlockStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeECData To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfNodeECData) + + if err := Convert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfNodeECData{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfNodeECData) + convertlog.Info("Convert NnfNodeECData From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfNodeStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfNodeStorage) + + if err := Convert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfNodeStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfNodeStorage) + convertlog.Info("Convert NnfNodeStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfPortManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfPortManager) + + if err := Convert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfPortManager{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfPortManager) + convertlog.Info("Convert NnfPortManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfStorage) + + if err := Convert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfStorage) + convertlog.Info("Convert NnfStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfStorageProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfStorageProfile) + + if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfStorageProfile{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfStorageProfile) + convertlog.Info("Convert NnfStorageProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert NnfSystemStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*nnfv1alpha4.NnfSystemStorage) + + if err := Convert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &nnfv1alpha4.NnfSystemStorage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *NnfSystemStorage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*nnfv1alpha4.NnfSystemStorage) + convertlog.Info("Convert NnfSystemStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +// The List-based ConvertTo/ConvertFrom routines are never used by the +// conversion webhook, but the conversion-verifier tool wants to see them. +// The conversion-gen tool generated the Convert_X_to_Y routines, should they +// ever be needed. + +func resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: "nnf", Resource: resource} +} + +func (src *NnfAccessList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertTo") +} + +func (dst *NnfAccessList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertFrom") +} + +func (src *NnfContainerProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertTo") +} + +func (dst *NnfContainerProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertFrom") +} + +func (src *NnfDataMovementList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertTo") +} + +func (dst *NnfDataMovementList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertFrom") +} + +func (src *NnfDataMovementManagerList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertTo") +} + +func (dst *NnfDataMovementManagerList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertFrom") +} + +func (src *NnfDataMovementProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertTo") +} + +func (dst *NnfDataMovementProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertFrom") +} + +func (src *NnfLustreMGTList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertTo") +} + +func (dst *NnfLustreMGTList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertFrom") +} + +func (src *NnfNodeList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertTo") +} + +func (dst *NnfNodeList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertFrom") +} + +func (src *NnfNodeBlockStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertTo") +} + +func (dst *NnfNodeBlockStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertFrom") +} + +func (src *NnfNodeECDataList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertTo") +} + +func (dst *NnfNodeECDataList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertFrom") +} + +func (src *NnfNodeStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertTo") +} + +func (dst *NnfNodeStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertFrom") +} + +func (src *NnfPortManagerList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertTo") +} + +func (dst *NnfPortManagerList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertFrom") +} + +func (src *NnfStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertTo") +} + +func (dst *NnfStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertFrom") +} + +func (src *NnfStorageProfileList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertTo") +} + +func (dst *NnfStorageProfileList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertFrom") +} + +func (src *NnfSystemStorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertTo") +} + +func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertFrom") +} diff --git a/api/v1alpha3/conversion_test.go b/api/v1alpha3/conversion_test.go new file mode 100644 index 00000000..c68ef9f5 --- /dev/null +++ b/api/v1alpha3/conversion_test.go @@ -0,0 +1,107 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" + utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" +) + +func TestFuzzyConversion(t *testing.T) { + + t.Run("for NnfAccess", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfAccess{}, + Spoke: &NnfAccess{}, + })) + + t.Run("for NnfContainerProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfContainerProfile{}, + Spoke: &NnfContainerProfile{}, + })) + + t.Run("for NnfDataMovement", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfDataMovement{}, + Spoke: &NnfDataMovement{}, + })) + + t.Run("for NnfDataMovementManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfDataMovementManager{}, + Spoke: &NnfDataMovementManager{}, + })) + + t.Run("for NnfDataMovementProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfDataMovementProfile{}, + Spoke: &NnfDataMovementProfile{}, + })) + + t.Run("for NnfLustreMGT", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfLustreMGT{}, + Spoke: &NnfLustreMGT{}, + })) + + t.Run("for NnfNode", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfNode{}, + Spoke: &NnfNode{}, + })) + + t.Run("for NnfNodeBlockStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfNodeBlockStorage{}, + Spoke: &NnfNodeBlockStorage{}, + })) + + t.Run("for NnfNodeECData", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfNodeECData{}, + Spoke: &NnfNodeECData{}, + })) + + t.Run("for NnfNodeStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfNodeStorage{}, + Spoke: &NnfNodeStorage{}, + })) + + t.Run("for NnfPortManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfPortManager{}, + Spoke: &NnfPortManager{}, + })) + + t.Run("for NnfStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfStorage{}, + Spoke: &NnfStorage{}, + })) + + t.Run("for NnfStorageProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfStorageProfile{}, + Spoke: &NnfStorageProfile{}, + })) + + t.Run("for NnfSystemStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ + Hub: &nnfv1alpha4.NnfSystemStorage{}, + Spoke: &NnfSystemStorage{}, + })) + +} + +// Just touch ginkgo, so it's here to interpret any ginkgo args from +// "make test", so that doesn't fail on this test file. +var _ = BeforeSuite(func() {}) diff --git a/api/v1alpha3/doc.go b/api/v1alpha3/doc.go new file mode 100644 index 00000000..6de88b87 --- /dev/null +++ b/api/v1alpha3/doc.go @@ -0,0 +1,23 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// The following tag tells conversion-gen to generate conversion routines, and +// it tells conversion-gen the name of the hub version. +// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha4 +package v1alpha3 diff --git a/internal/controller/conversion_test.go b/internal/controller/conversion_test.go index 3361ac2e..2fd1796d 100644 --- a/internal/controller/conversion_test.go +++ b/internal/controller/conversion_test.go @@ -33,6 +33,7 @@ import ( nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" ) @@ -47,16 +48,16 @@ var _ = Describe("Conversion Webhook Test", func() { // have that annotation when it is accessed by its hub API. Context("NnfAccess", func() { - var resHub *nnfv1alpha3.NnfAccess + var resHub *nnfv1alpha4.NnfAccess BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfAccess{ + resHub = &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfAccessSpec{ + Spec: nnfv1alpha4.NnfAccessSpec{ DesiredState: "mounted", TeardownState: "Teardown", Target: "all", @@ -71,7 +72,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfAccess{} + expected := &nnfv1alpha4.NnfAccess{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -102,20 +103,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfAccess resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfAccess{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfAccess" }) Context("NnfContainerProfile", func() { - var resHub *nnfv1alpha3.NnfContainerProfile + var resHub *nnfv1alpha4.NnfContainerProfile BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfContainerProfile{ + resHub = &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Data: nnfv1alpha3.NnfContainerProfileData{ + Data: nnfv1alpha4.NnfContainerProfileData{ Spec: &corev1.PodSpec{ NodeName: "rabbit-1", Containers: []corev1.Container{{Name: "one"}}, @@ -129,7 +148,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfContainerProfile{} + expected := &nnfv1alpha4.NnfContainerProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -160,20 +179,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfContainerProfile resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfContainerProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfContainerProfile" }) Context("NnfDataMovement", func() { - var resHub *nnfv1alpha3.NnfDataMovement + var resHub *nnfv1alpha4.NnfDataMovement BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfDataMovement{ + resHub = &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfDataMovementSpec{}, + Spec: nnfv1alpha4.NnfDataMovementSpec{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -182,7 +219,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfDataMovement{} + expected := &nnfv1alpha4.NnfDataMovement{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -213,20 +250,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfDataMovement resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfDataMovement{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovement" }) Context("NnfDataMovementManager", func() { - var resHub *nnfv1alpha3.NnfDataMovementManager + var resHub *nnfv1alpha4.NnfDataMovementManager BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfDataMovementManager{ + resHub = &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha4.NnfDataMovementManagerSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -244,7 +299,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfDataMovementManager{} + expected := &nnfv1alpha4.NnfDataMovementManager{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -275,20 +330,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfDataMovementManager resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfDataMovementManager{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovementManager" }) Context("NnfDataMovementProfile", func() { - var resHub *nnfv1alpha3.NnfDataMovementProfile + var resHub *nnfv1alpha4.NnfDataMovementProfile BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfDataMovementProfile{ + resHub = &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Data: nnfv1alpha3.NnfDataMovementProfileData{}, + Data: nnfv1alpha4.NnfDataMovementProfileData{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -297,7 +370,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfDataMovementProfile{} + expected := &nnfv1alpha4.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -328,20 +401,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfDataMovementProfile resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfDataMovementProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfDataMovementProfile" }) Context("NnfLustreMGT", func() { - var resHub *nnfv1alpha3.NnfLustreMGT + var resHub *nnfv1alpha4.NnfLustreMGT BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfLustreMGT{ + resHub = &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{"rabbit-1@tcp", "rabbit-2@tcp"}, }, } @@ -352,7 +443,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfLustreMGT{} + expected := &nnfv1alpha4.NnfLustreMGT{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -383,20 +474,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfLustreMGT resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfLustreMGT{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfLustreMGT" }) Context("NnfNode", func() { - var resHub *nnfv1alpha3.NnfNode + var resHub *nnfv1alpha4.NnfNode BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfNode{ + resHub = &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfNodeSpec{ + Spec: nnfv1alpha4.NnfNodeSpec{ State: "Enable", }, } @@ -407,7 +516,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfNode{} + expected := &nnfv1alpha4.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -438,20 +547,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfNode resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfNode{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfNode" }) Context("NnfNodeBlockStorage", func() { - var resHub *nnfv1alpha3.NnfNodeBlockStorage + var resHub *nnfv1alpha4.NnfNodeBlockStorage BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfNodeBlockStorage{ + resHub = &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfNodeBlockStorageSpec{}, + Spec: nnfv1alpha4.NnfNodeBlockStorageSpec{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -460,7 +587,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfNodeBlockStorage{} + expected := &nnfv1alpha4.NnfNodeBlockStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -491,20 +618,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfNodeBlockStorage resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfNodeBlockStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfNodeBlockStorage" }) Context("NnfNodeECData", func() { - var resHub *nnfv1alpha3.NnfNodeECData + var resHub *nnfv1alpha4.NnfNodeECData BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfNodeECData{ + resHub = &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfNodeECDataSpec{}, + Spec: nnfv1alpha4.NnfNodeECDataSpec{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -513,7 +658,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfNodeECData{} + expected := &nnfv1alpha4.NnfNodeECData{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -544,20 +689,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfNodeECData resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfNodeECData{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfNodeECData" }) Context("NnfNodeStorage", func() { - var resHub *nnfv1alpha3.NnfNodeStorage + var resHub *nnfv1alpha4.NnfNodeStorage BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfNodeStorage{ + resHub = &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfNodeStorageSpec{}, + Spec: nnfv1alpha4.NnfNodeStorageSpec{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -566,7 +729,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfNodeStorage{} + expected := &nnfv1alpha4.NnfNodeStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -597,21 +760,39 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfNodeStorage resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfNodeStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfNodeStorage" }) Context("NnfPortManager", func() { - var resHub *nnfv1alpha3.NnfPortManager + var resHub *nnfv1alpha4.NnfPortManager BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfPortManager{ + resHub = &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfPortManagerSpec{ - Allocations: make([]nnfv1alpha3.NnfPortManagerAllocationSpec, 0), + Spec: nnfv1alpha4.NnfPortManagerSpec{ + Allocations: make([]nnfv1alpha4.NnfPortManagerAllocationSpec, 0), }, } @@ -621,7 +802,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfPortManager{} + expected := &nnfv1alpha4.NnfPortManager{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -652,21 +833,39 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfPortManager resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfPortManager{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfPortManager" }) Context("NnfStorage", func() { - var resHub *nnfv1alpha3.NnfStorage + var resHub *nnfv1alpha4.NnfStorage BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfStorage{ + resHub = &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfStorageSpec{ - AllocationSets: []nnfv1alpha3.NnfStorageAllocationSetSpec{}, + Spec: nnfv1alpha4.NnfStorageSpec{ + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{}, }, } @@ -676,7 +875,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfStorage{} + expected := &nnfv1alpha4.NnfStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -707,20 +906,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfStorage resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfStorage" }) Context("NnfStorageProfile", func() { - var resHub *nnfv1alpha3.NnfStorageProfile + var resHub *nnfv1alpha4.NnfStorageProfile BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfStorageProfile{ + resHub = &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Data: nnfv1alpha3.NnfStorageProfileData{}, + Data: nnfv1alpha4.NnfStorageProfileData{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -729,7 +946,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfStorageProfile{} + expected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -760,20 +977,38 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfStorageProfile resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfStorageProfile{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfStorageProfile" }) Context("NnfSystemStorage", func() { - var resHub *nnfv1alpha3.NnfSystemStorage + var resHub *nnfv1alpha4.NnfSystemStorage BeforeEach(func() { id := uuid.NewString()[0:8] - resHub = &nnfv1alpha3.NnfSystemStorage{ + resHub = &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: id, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{}, + Spec: nnfv1alpha4.NnfSystemStorageSpec{}, } Expect(k8sClient.Create(context.TODO(), resHub)).To(Succeed()) @@ -782,7 +1017,7 @@ var _ = Describe("Conversion Webhook Test", func() { AfterEach(func() { if resHub != nil { Expect(k8sClient.Delete(context.TODO(), resHub)).To(Succeed()) - expected := &nnfv1alpha3.NnfSystemStorage{} + expected := &nnfv1alpha4.NnfSystemStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present. return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), expected) }).ShouldNot(Succeed()) @@ -813,6 +1048,24 @@ var _ = Describe("Conversion Webhook Test", func() { }).Should(Succeed()) }) + It("reads NnfSystemStorage resource via hub and via spoke v1alpha3", func() { + // Spoke should have annotation. + resSpoke := &nnfv1alpha3.NnfSystemStorage{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).To(Succeed()) + anno := resSpoke.GetAnnotations() + g.Expect(anno).To(HaveLen(1)) + g.Expect(anno).Should(HaveKey(utilconversion.DataAnnotation)) + }).Should(Succeed()) + + // Hub should not have annotation. + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resHub)).To(Succeed()) + anno := resHub.GetAnnotations() + g.Expect(anno).To(HaveLen(0)) + }).Should(Succeed()) + }) + // +crdbumper:scaffold:spoketest="nnf.NnfSystemStorage" }) From 4b6d4082e8f2b689c347fc14d7fcbe876cf242f3 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:23 -0600 Subject: [PATCH 06/23] CRDBUMPER-bump-controllers Point controllers at new hub v1alpha4 Point conversion fuzz test at new hub. These routines are still valid for the new hub because it is currently identical to the previous hub. ACTION: Some controllers may have been referencing one of these non-local APIs. Verify that these APIs are being referenced by their correct versions: DirectiveBreakdown, Workflow Signed-off-by: Blake Devcich --- config/examples/nnf_nnfcontainerprofiles.yaml | 14 +- .../examples/nnf_nnfdatamovementprofile.yaml | 2 +- config/examples/nnf_nnfstorageprofile.yaml | 2 +- config/ports/port_manager.yaml | 2 +- .../util/conversion/conversion_test.go | 238 +++++++++--------- .../directivebreakdown_controller.go | 6 +- .../directivebreakdown_controller_test.go | 8 +- internal/controller/dws_servers_controller.go | 12 +- internal/controller/dws_storage_controller.go | 8 +- internal/controller/filesystem_helpers.go | 44 ++-- internal/controller/integration_test.go | 98 ++++---- internal/controller/nnf_access_controller.go | 86 +++---- .../controller/nnf_access_controller_test.go | 58 ++--- .../controller/nnf_clientmount_controller.go | 16 +- .../controller/nnf_lustre_mgt_controller.go | 20 +- .../nnf_lustre_mgt_controller_test.go | 14 +- .../nnf_node_block_storage_controller.go | 36 +-- internal/controller/nnf_node_controller.go | 54 ++-- .../controller/nnf_node_ec_data_controller.go | 18 +- .../controller/nnf_node_storage_controller.go | 14 +- .../nnf_node_storage_controller_test.go | 18 +- ...nf_persistentstorageinstance_controller.go | 16 +- ...rsistentstorageinstance_controller_test.go | 8 +- .../controller/nnf_port_manager_controller.go | 58 ++--- .../nnf_port_manager_controller_test.go | 52 ++-- internal/controller/nnf_storage_controller.go | 100 ++++---- .../nnf_systemconfiguration_controller.go | 14 +- ...nnf_systemconfiguration_controller_test.go | 16 +- .../controller/nnf_workflow_controller.go | 102 ++++---- ...f_workflow_controller_container_helpers.go | 18 +- .../nnf_workflow_controller_helpers.go | 84 +++---- .../nnf_workflow_controller_helpers_test.go | 8 +- .../nnf_workflow_controller_test.go | 94 +++---- .../controller/nnfcontainerprofile_helpers.go | 14 +- .../controller/nnfcontainerprofile_test.go | 16 +- .../nnfdatamovementprofile_helpers.go | 14 +- .../controller/nnfdatamovementprofile_test.go | 14 +- .../controller/nnfstorageprofile_helpers.go | 30 +-- internal/controller/nnfstorageprofile_test.go | 14 +- .../controller/nnfsystemstorage_controller.go | 64 ++--- .../nnfsystemstorage_controller_test.go | 50 ++-- internal/controller/suite_test.go | 32 +-- mount-daemon/main.go | 4 +- 43 files changed, 797 insertions(+), 793 deletions(-) diff --git a/config/examples/nnf_nnfcontainerprofiles.yaml b/config/examples/nnf_nnfcontainerprofiles.yaml index 010f2af7..70497e09 100644 --- a/config/examples/nnf_nnfcontainerprofiles.yaml +++ b/config/examples/nnf_nnfcontainerprofiles.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-success @@ -20,7 +20,7 @@ data: - -c - "sleep 10 && exit 0" --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-fail @@ -34,7 +34,7 @@ data: - -c - "sleep 10 && exit 1" --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-randomly-fail @@ -59,7 +59,7 @@ data: echo "exiting: $x" exit $x --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-forever @@ -79,7 +79,7 @@ data: - -c - "while true; do date && sleep 5; done" --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-mpi @@ -117,7 +117,7 @@ data: image: nnf-mfu:latest --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-mpi-fail @@ -145,7 +145,7 @@ data: - name: example-mpi-fail image: nnf-mfu:latest --- -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfContainerProfile metadata: name: example-mpi-webserver diff --git a/config/examples/nnf_nnfdatamovementprofile.yaml b/config/examples/nnf_nnfdatamovementprofile.yaml index 45126ae8..ed153ee3 100644 --- a/config/examples/nnf_nnfdatamovementprofile.yaml +++ b/config/examples/nnf_nnfdatamovementprofile.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfDataMovementProfile metadata: name: template diff --git a/config/examples/nnf_nnfstorageprofile.yaml b/config/examples/nnf_nnfstorageprofile.yaml index f693c728..74211b41 100644 --- a/config/examples/nnf_nnfstorageprofile.yaml +++ b/config/examples/nnf_nnfstorageprofile.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfStorageProfile metadata: name: template diff --git a/config/ports/port_manager.yaml b/config/ports/port_manager.yaml index c4dc5884..854e1be6 100644 --- a/config/ports/port_manager.yaml +++ b/config/ports/port_manager.yaml @@ -1,4 +1,4 @@ -apiVersion: nnf.cray.hpe.com/v1alpha3 +apiVersion: nnf.cray.hpe.com/v1alpha4 kind: NnfPortManager metadata: name: port-manager diff --git a/github/cluster-api/util/conversion/conversion_test.go b/github/cluster-api/util/conversion/conversion_test.go index e8aab1eb..5d4271d9 100644 --- a/github/cluster-api/util/conversion/conversion_test.go +++ b/github/cluster-api/util/conversion/conversion_test.go @@ -19,7 +19,7 @@ package conversion import ( "testing" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -30,85 +30,85 @@ import ( var ( oldNnfAccessGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfAccess", } oldNnfContainerProfileGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfContainerProfile", } oldNnfDataMovementGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfDataMovement", } oldNnfDataMovementManagerGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfDataMovementManager", } oldNnfDataMovementProfileGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfDataMovementProfile", } oldNnfLustreMGTGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfLustreMGT", } oldNnfNodeGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfNode", } oldNnfNodeBlockStorageGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfNodeBlockStorage", } oldNnfNodeECDataGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfNodeECData", } oldNnfNodeStorageGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfNodeStorage", } oldNnfPortManagerGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfPortManager", } oldNnfStorageGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfStorage", } oldNnfStorageProfileGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfStorageProfile", } oldNnfSystemStorageGVK = schema.GroupVersionKind{ - Group: nnfv1alpha3.GroupVersion.Group, + Group: nnfv1alpha4.GroupVersion.Group, Version: "v1old", Kind: "NnfSystemStorage", } @@ -120,14 +120,14 @@ func TestMarshalData(t *testing.T) { g := NewWithT(t) t.Run("NnfAccess should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfAccess{ + src := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfAccessSpec{ + Spec: nnfv1alpha4.NnfAccessSpec{ DesiredState: "mounted", UserID: 1551, GroupID: 2442, @@ -150,13 +150,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfAccess should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfAccess{ + src := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfAccess")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfAccess")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -170,14 +170,14 @@ func TestMarshalData(t *testing.T) { prerun := int64(345) userid := uint32(7667) groupid := uint32(8448) - src := &nnfv1alpha3.NnfContainerProfile{ + src := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Data: nnfv1alpha3.NnfContainerProfileData{ + Data: nnfv1alpha4.NnfContainerProfileData{ PreRunTimeoutSeconds: &prerun, UserID: &userid, GroupID: &groupid, @@ -200,13 +200,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfContainerProfile should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfContainerProfile{ + src := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfContainerProfile")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfContainerProfile")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -217,20 +217,20 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovement should write source object to destination", func(*testing.T) { - destpath := &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + destpath := &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: "little/red", } - srcpath := &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + srcpath := &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: "/dev/null", } - src := &nnfv1alpha3.NnfDataMovement{ + src := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfDataMovementSpec{ + Spec: nnfv1alpha4.NnfDataMovementSpec{ Destination: destpath, Source: srcpath, }, @@ -251,13 +251,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovement should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovement{ + src := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfDataMovement")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfDataMovement")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -268,14 +268,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovementManager should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementManager{ + src := &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha4.NnfDataMovementManagerSpec{ HostPath: "/this/dir", MountPath: "/mnts", }, @@ -296,13 +296,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovementManager should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementManager{ + src := &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfDataMovementManager")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfDataMovementManager")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -313,14 +313,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovementProfile should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementProfile{ + src := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Data: nnfv1alpha3.NnfDataMovementProfileData{ + Data: nnfv1alpha4.NnfDataMovementProfileData{ Command: "mpirun is cool", StatCommand: "stat --something", }, @@ -341,13 +341,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfDataMovementProfile should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementProfile{ + src := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfDataMovementProfile")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfDataMovementProfile")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -359,14 +359,14 @@ func TestMarshalData(t *testing.T) { t.Run("NnfLustreMGT should write source object to destination", func(*testing.T) { blacklist := []string{"black-fly", "black bird"} - src := &nnfv1alpha3.NnfLustreMGT{ + src := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ FsNameStart: "aaaa-pizza", FsNameBlackList: blacklist, }, @@ -388,13 +388,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfLustreMGT should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfLustreMGT{ + src := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfLustreMGT")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfLustreMGT")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -405,14 +405,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNode should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfNode{ + src := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfNodeSpec{ + Spec: nnfv1alpha4.NnfNodeSpec{ Name: "rabbit-1", Pod: "nnf-thingy-122", }, @@ -433,13 +433,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNode should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfNode{ + src := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfNode")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfNode")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -450,17 +450,17 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeBlockStorage should write source object to destination", func(*testing.T) { - alloc := []nnfv1alpha3.NnfNodeBlockStorageAllocationSpec{ + alloc := []nnfv1alpha4.NnfNodeBlockStorageAllocationSpec{ {Access: []string{"rabbit-44", "rabbit-10002"}}, } - src := &nnfv1alpha3.NnfNodeBlockStorage{ + src := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfNodeBlockStorageSpec{ + Spec: nnfv1alpha4.NnfNodeBlockStorageSpec{ Allocations: alloc, }, } @@ -480,13 +480,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeBlockStorage should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeBlockStorage{ + src := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfNodeBlockStorage")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfNodeBlockStorage")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -497,18 +497,18 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeECData should write source object to destination", func(*testing.T) { - elem1 := nnfv1alpha3.NnfNodeECPrivateData{"element1": "the world"} - priv := map[string]nnfv1alpha3.NnfNodeECPrivateData{ + elem1 := nnfv1alpha4.NnfNodeECPrivateData{"element1": "the world"} + priv := map[string]nnfv1alpha4.NnfNodeECPrivateData{ "thing1": elem1, } - src := &nnfv1alpha3.NnfNodeECData{ + src := &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Status: nnfv1alpha3.NnfNodeECDataStatus{ + Status: nnfv1alpha4.NnfNodeECDataStatus{ Data: priv, }, } @@ -529,13 +529,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeECData should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeECData{ + src := &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfNodeECData")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfNodeECData")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -546,14 +546,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeStorage should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeStorage{ + src := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfNodeStorageSpec{ + Spec: nnfv1alpha4.NnfNodeStorageSpec{ UserID: 4997, GroupID: 2112, FileSystemType: "gfs2", @@ -576,13 +576,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfNodeStorage should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeStorage{ + src := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfNodeStorage")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfNodeStorage")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -593,14 +593,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfPortManager should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfPortManager{ + src := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfPortManagerSpec{ + Spec: nnfv1alpha4.NnfPortManagerSpec{ SystemConfiguration: corev1.ObjectReference{ Namespace: "willy-wonka", Name: "candy-land", @@ -623,13 +623,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfPortManager should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfPortManager{ + src := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfPortManager")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfPortManager")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -640,14 +640,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfStorage should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfStorage{ + src := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfStorageSpec{ + Spec: nnfv1alpha4.NnfStorageSpec{ FileSystemType: "gfs2", UserID: 4004, GroupID: 2992, @@ -670,13 +670,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfStorage should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfStorage{ + src := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfStorage")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfStorage")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -687,15 +687,15 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfStorageProfile should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfStorageProfile{ + src := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Data: nnfv1alpha3.NnfStorageProfileData{ - LustreStorage: nnfv1alpha3.NnfStorageProfileLustreData{ + Data: nnfv1alpha4.NnfStorageProfileData{ + LustreStorage: nnfv1alpha4.NnfStorageProfileLustreData{ ExternalMGS: "kfi@1:this@that", }, }, @@ -715,13 +715,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfStorageProfile should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfStorageProfile{ + src := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfStorageProfile")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfStorageProfile")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -732,14 +732,14 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfSystemStorage should write source object to destination", func(*testing.T) { - src := &nnfv1alpha3.NnfSystemStorage{ + src := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", Labels: map[string]string{ "label1": "", }, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{ + Spec: nnfv1alpha4.NnfSystemStorageSpec{ ClientMountPath: "/on/this", }, } @@ -758,13 +758,13 @@ func TestMarshalData(t *testing.T) { }) t.Run("NnfSystemStorage should append the annotation", func(*testing.T) { - src := &nnfv1alpha3.NnfSystemStorage{ + src := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, } dst := &unstructured.Unstructured{} - dst.SetGroupVersionKind(nnfv1alpha3.GroupVersion.WithKind("NnfSystemStorage")) + dst.SetGroupVersionKind(nnfv1alpha4.GroupVersion.WithKind("NnfSystemStorage")) dst.SetName("test-1") dst.SetAnnotations(map[string]string{ "annotation": "1", @@ -781,7 +781,7 @@ func TestUnmarshalData(t *testing.T) { g := NewWithT(t) t.Run("NnfAccess should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfAccess{ + src := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -803,7 +803,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfAccess{ + dst := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -828,7 +828,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfAccess{ + dst := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -843,7 +843,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfContainerProfile should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfContainerProfile{ + src := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -865,7 +865,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfContainerProfile{ + dst := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -890,7 +890,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfContainerProfile{ + dst := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -905,7 +905,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfDataMovement should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovement{ + src := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -927,7 +927,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovement{ + dst := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -952,7 +952,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovement{ + dst := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -967,7 +967,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfDataMovementManager should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementManager{ + src := &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -989,7 +989,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovementManager{ + dst := &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1014,7 +1014,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovementManager{ + dst := &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1029,7 +1029,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfDataMovementProfile should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfDataMovementProfile{ + src := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1051,7 +1051,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovementProfile{ + dst := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1076,7 +1076,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfDataMovementProfile{ + dst := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1091,7 +1091,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfLustreMGT should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfLustreMGT{ + src := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1113,7 +1113,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfLustreMGT{ + dst := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1138,7 +1138,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfLustreMGT{ + dst := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1153,7 +1153,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfNode should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfNode{ + src := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1175,7 +1175,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNode{ + dst := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1200,7 +1200,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNode{ + dst := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1215,7 +1215,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfNodeBlockStorage should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeBlockStorage{ + src := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1237,7 +1237,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeBlockStorage{ + dst := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1262,7 +1262,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeBlockStorage{ + dst := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1277,7 +1277,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfNodeECData should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeECData{ + src := &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1299,7 +1299,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeECData{ + dst := &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1324,7 +1324,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeECData{ + dst := &nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1339,7 +1339,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfNodeStorage should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfNodeStorage{ + src := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1361,7 +1361,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeStorage{ + dst := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1386,7 +1386,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfNodeStorage{ + dst := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1401,7 +1401,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfPortManager should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfPortManager{ + src := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1423,7 +1423,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfPortManager{ + dst := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1448,7 +1448,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfPortManager{ + dst := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1463,7 +1463,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfStorage should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfStorage{ + src := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1485,7 +1485,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfStorage{ + dst := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1510,7 +1510,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfStorage{ + dst := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1525,7 +1525,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfStorageProfile should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfStorageProfile{ + src := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1547,7 +1547,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfStorageProfile{ + dst := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1572,7 +1572,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfStorageProfile{ + dst := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1587,7 +1587,7 @@ func TestUnmarshalData(t *testing.T) { }) t.Run("NnfSystemStorage should return false without errors if annotation doesn't exist", func(*testing.T) { - src := &nnfv1alpha3.NnfSystemStorage{ + src := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1609,7 +1609,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfSystemStorage{ + dst := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, @@ -1634,7 +1634,7 @@ func TestUnmarshalData(t *testing.T) { DataAnnotation: "{\"metadata\":{\"name\":\"test-1\",\"creationTimestamp\":null,\"labels\":{\"label1\":\"\"}},\"spec\":{},\"status\":{}}", }) - dst := &nnfv1alpha3.NnfSystemStorage{ + dst := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", }, diff --git a/internal/controller/directivebreakdown_controller.go b/internal/controller/directivebreakdown_controller.go index fd3e1dfa..437c9496 100644 --- a/internal/controller/directivebreakdown_controller.go +++ b/internal/controller/directivebreakdown_controller.go @@ -43,7 +43,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -624,7 +624,7 @@ func populateStorageAllocationSet(a *dwsv1alpha2.StorageAllocationSet, strategy func (r *DirectiveBreakdownReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ &dwsv1alpha2.ServersList{}, - &nnfv1alpha3.NnfStorageProfileList{}, + &nnfv1alpha4.NnfStorageProfileList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, } @@ -634,6 +634,6 @@ func (r *DirectiveBreakdownReconciler) SetupWithManager(mgr ctrl.Manager) error For(&dwsv1alpha2.DirectiveBreakdown{}). Owns(&dwsv1alpha2.Servers{}). Owns(&dwsv1alpha2.PersistentStorageInstance{}). - Owns(&nnfv1alpha3.NnfStorageProfile{}). + Owns(&nnfv1alpha4.NnfStorageProfile{}). Complete(r) } diff --git a/internal/controller/directivebreakdown_controller_test.go b/internal/controller/directivebreakdown_controller_test.go index e489f690..1600b310 100644 --- a/internal/controller/directivebreakdown_controller_test.go +++ b/internal/controller/directivebreakdown_controller_test.go @@ -30,12 +30,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("DirectiveBreakdown test", func() { var ( - storageProfile *nnfv1alpha3.NnfStorageProfile + storageProfile *nnfv1alpha4.NnfStorageProfile ) BeforeEach(func() { @@ -45,7 +45,7 @@ var _ = Describe("DirectiveBreakdown test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -81,7 +81,7 @@ var _ = Describe("DirectiveBreakdown test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), servers) }).Should(Succeed(), "Create the DWS Servers Resource") - pinnedStorageProfile := &nnfv1alpha3.NnfStorageProfile{ + pinnedStorageProfile := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: directiveBreakdown.GetName(), Namespace: directiveBreakdown.GetNamespace(), diff --git a/internal/controller/dws_servers_controller.go b/internal/controller/dws_servers_controller.go index 5a601931..163db61e 100644 --- a/internal/controller/dws_servers_controller.go +++ b/internal/controller/dws_servers_controller.go @@ -42,7 +42,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -170,7 +170,7 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * // Get the NnfStorage with the same name/namespace as the servers resource. It may not exist // yet if we're still in proposal phase, or if it was deleted in teardown. - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}, nnfStorage); err != nil { if apierrors.IsNotFound(err) { return r.statusSetEmpty(ctx, servers) @@ -222,13 +222,13 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * // Loop through the nnfNodeStorages corresponding to each of the Rabbit nodes and find matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha3.AllocationSetLabel] = label + matchLabels[nnfv1alpha4.AllocationSetLabel] = label listOptions := []client.ListOption{ matchLabels, } - nnfNodeBlockStorageList := &nnfv1alpha3.NnfNodeBlockStorageList{} + nnfNodeBlockStorageList := &nnfv1alpha4.NnfNodeBlockStorageList{} if err := r.List(ctx, nnfNodeBlockStorageList, listOptions...); err != nil { return ctrl.Result{}, err } @@ -363,7 +363,7 @@ func (r *DWSServersReconciler) checkDeletedStorage(ctx context.Context, servers log := r.Log.WithValues("Servers", types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}) // Get the NnfStorage with the same name/namespace as the servers resource - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}, nnfStorage); err != nil { if apierrors.IsNotFound(err) { log.Info("NnfStorage is deleted") @@ -394,6 +394,6 @@ func (r *DWSServersReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.Servers{}). - Watches(&nnfv1alpha3.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(nnfStorageServersMapFunc)). + Watches(&nnfv1alpha4.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(nnfStorageServersMapFunc)). Complete(r) } diff --git a/internal/controller/dws_storage_controller.go b/internal/controller/dws_storage_controller.go index bdfae142..787d33e0 100644 --- a/internal/controller/dws_storage_controller.go +++ b/internal/controller/dws_storage_controller.go @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) type DWSStorageReconciler struct { @@ -103,7 +103,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Ensure the storage resource is updated with the latest NNF Node resource status - nnfNode := &nnfv1alpha3.NnfNode{ + nnfNode := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: storage.GetName(), @@ -155,7 +155,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) device.Slot = drive.Slot device.Status = drive.Status.ConvertToDWSResourceStatus() - if drive.Status == nnfv1alpha3.ResourceReady { + if drive.Status == nnfv1alpha4.ResourceReady { wearLevel := drive.WearLevel device.Model = drive.Model device.SerialNumber = drive.SerialNumber @@ -273,7 +273,7 @@ func (r *DWSStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dwsv1alpha2.Storage{}). - Watches(&nnfv1alpha3.NnfNode{}, handler.EnqueueRequestsFromMapFunc(nnfNodeMapFunc)). + Watches(&nnfv1alpha4.NnfNode{}, handler.EnqueueRequestsFromMapFunc(nnfNodeMapFunc)). Watches(&corev1.Node{}, handler.EnqueueRequestsFromMapFunc(nodeMapFunc)). Complete(r) } diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index aafc352e..e81dc58f 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -35,14 +35,14 @@ import ( "github.com/go-logr/logr" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/finalizers,verbs=update //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;create;list;watch;update;patch;delete;deletecollection -func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { +func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) if err != nil { @@ -58,7 +58,7 @@ func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nn } // getBlockDeviceAndFileSystem returns blockdevice and filesystem interfaces based on the allocation type and NnfStorageProfile. -func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { +func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") if found || os.Getenv("ENVIRONMENT") == "kind" { return getBlockDeviceAndFileSystemForKind(ctx, c, nnfNodeStorage, index, log) @@ -107,7 +107,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt return blockDevice, fileSystem, nil case "lustre": - commandLines := nnfv1alpha3.NnfStorageProfileLustreCmdLines{} + commandLines := nnfv1alpha4.NnfStorageProfileLustreCmdLines{} switch nnfNodeStorage.Spec.LustreStorage.TargetType { case "mgt": @@ -151,7 +151,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt return nil, nil, dwsv1alpha2.NewResourceError("unsupported file system type %s", nnfNodeStorage.Spec.FileSystemType).WithMajor() } -func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage) (bool, error) { +func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBlockStorage *nnfv1alpha4.NnfNodeBlockStorage) (bool, error) { if _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT"); found { return true, nil } @@ -188,15 +188,15 @@ func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBloc return false, nil } -func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileLustreCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileLustreCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { zpool := blockdevice.Zpool{} // This is for the fake NnfNodeStorage case. We don't need to create the zpool BlockDevice - if nnfNodeStorage.Spec.BlockReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfNodeBlockStorage{}).Name() { + if nnfNodeStorage.Spec.BlockReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfNodeBlockStorage{}).Name() { return newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) } - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.GetName(), Namespace: nnfNodeStorage.GetNamespace(), @@ -237,7 +237,7 @@ func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *n return &zpool, nil } -func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { lvmDesc := blockdevice.Lvm{} devices := []string{} @@ -246,8 +246,8 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf blockIndex = 0 } - if nnfNodeStorage.Spec.BlockReference.Kind == reflect.TypeOf(nnfv1alpha3.NnfNodeBlockStorage{}).Name() { - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{ + if nnfNodeStorage.Spec.BlockReference.Kind == reflect.TypeOf(nnfv1alpha4.NnfNodeBlockStorage{}).Name() { + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.GetName(), Namespace: nnfNodeStorage.GetNamespace(), @@ -330,7 +330,7 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &lvmDesc, nil } -func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, error) { +func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, error) { blockDevice := blockdevice.MockBlockDevice{ Log: log, } @@ -338,7 +338,7 @@ func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nn return &blockDevice, nil } -func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -358,7 +358,7 @@ func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -386,7 +386,7 @@ func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.SimpleFileSystem{} fs.Log = log @@ -411,7 +411,7 @@ func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv return &fs, nil } -func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, cmdLines nnfv1alpha3.NnfStorageProfileLustreCmdLines, mountCommand string, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, cmdLines nnfv1alpha4.NnfStorageProfileLustreCmdLines, mountCommand string, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { fs := filesystem.LustreFileSystem{} targetPath, err := lustreTargetPath(ctx, c, nnfNodeStorage, nnfNodeStorage.Spec.LustreStorage.TargetType, nnfNodeStorage.Spec.LustreStorage.StartIndex+index) @@ -441,7 +441,7 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n return &fs, nil } -func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { path := os.Getenv("MOCK_FILE_SYSTEM_PATH") if len(path) == 0 { path = "/mnt/filesystems" @@ -455,7 +455,7 @@ func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf return &fs, nil } -func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, targetType string, index int) (string, error) { +func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, targetType string, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -468,7 +468,7 @@ func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv return fmt.Sprintf("/mnt/nnf/%s-%s-%d", nnfStorageUid, targetType, index), nil } -func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, targetType string, index int) (string, error) { +func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, targetType string, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -481,7 +481,7 @@ func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3 return fmt.Sprintf("pool-%s-%s-%d", nnfStorageUid, targetType, index), nil } -func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int) (string, error) { +func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int) (string, error) { labels := nnfNodeStorage.GetLabels() // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. @@ -490,7 +490,7 @@ func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1 if !ok { return "", fmt.Errorf("missing Owner UID label on NnfNodeStorage") } - directiveIndex, ok := labels[nnfv1alpha3.DirectiveIndexLabel] + directiveIndex, ok := labels[nnfv1alpha4.DirectiveIndexLabel] if !ok { return "", fmt.Errorf("missing directive index label on NnfNodeStorage") } @@ -502,7 +502,7 @@ func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1 return fmt.Sprintf("%s_%s_%d", nnfStorageUid, directiveIndex, index), nil } -func logicalVolumeName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int) (string, error) { +func logicalVolumeName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int) (string, error) { if nnfNodeStorage.Spec.SharedAllocation { // For a shared VG, the LV name must be unique in the VG return fmt.Sprintf("lv-%d", index), nil diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go index 2d1ee4d1..6dc224dc 100644 --- a/internal/controller/integration_test.go +++ b/internal/controller/integration_test.go @@ -42,7 +42,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" dwparse "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("Integration Test", func() { @@ -62,9 +62,9 @@ var _ = Describe("Integration Test", func() { persistentInstance *dwsv1alpha2.PersistentStorageInstance nodeNames []string setup sync.Once - storageProfile *nnfv1alpha3.NnfStorageProfile - dmProfile *nnfv1alpha3.NnfDataMovementProfile - dmm *nnfv1alpha3.NnfDataMovementManager + storageProfile *nnfv1alpha4.NnfStorageProfile + dmProfile *nnfv1alpha4.NnfDataMovementProfile + dmm *nnfv1alpha4.NnfDataMovementManager ) advanceState := func(state dwsv1alpha2.WorkflowState, w *dwsv1alpha2.Workflow, testStackOffset int) { @@ -82,12 +82,12 @@ var _ = Describe("Integration Test", func() { }).WithOffset(testStackOffset).Should(Equal(state), fmt.Sprintf("Waiting on state %s", state)) } - verifyNnfNodeStoragesHaveStorageProfileLabel := func(nnfStorage *nnfv1alpha3.NnfStorage) { + verifyNnfNodeStoragesHaveStorageProfileLabel := func(nnfStorage *nnfv1alpha4.NnfStorage) { for allocationSetIndex := range nnfStorage.Spec.AllocationSets { allocationSet := nnfStorage.Spec.AllocationSets[allocationSetIndex] for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(nnfStorage, allocationSetIndex, i), Namespace: node.Name, @@ -138,14 +138,14 @@ var _ = Describe("Integration Test", func() { if findDataMovementDirectiveIndex() >= 0 { - dms := &nnfv1alpha3.NnfDataMovementList{} + dms := &nnfv1alpha4.NnfDataMovementList{} Expect(k8sClient.List(context.TODO(), dms)).To(Succeed()) for _, dm := range dms.Items { dm := dm g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(&dm), &dm)).To(Succeed()) - dm.Status.State = nnfv1alpha3.DataMovementConditionTypeFinished - dm.Status.Status = nnfv1alpha3.DataMovementConditionReasonSuccess + dm.Status.State = nnfv1alpha4.DataMovementConditionTypeFinished + dm.Status.Status = nnfv1alpha4.DataMovementConditionReasonSuccess g.Expect(k8sClient.Status().Update(context.TODO(), &dm)).To(Succeed()) } } @@ -164,7 +164,7 @@ var _ = Describe("Integration Test", func() { } By("Verify that the NnfStorage now owns the pinned profile") commonName, commonNamespace := getStorageReferenceNameFromWorkflowActual(w, dwIndex) - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: commonName, Namespace: commonNamespace}, nnfStorage)).To(Succeed()) Expect(verifyPinnedProfile(context.TODO(), k8sClient, commonNamespace, commonName)).WithOffset(testStackOffset).To(Succeed()) @@ -257,7 +257,7 @@ var _ = Describe("Integration Test", func() { BlockOwnerDeletion: &blockOwnerDeletion, } - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if nnfStoragePresent { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentInstance), nnfStorage)).To(Succeed(), "Fetch NnfStorage matching PersistentStorageInstance") Expect(nnfStorage.ObjectMeta.OwnerReferences).To(ContainElement(persistentStorageOwnerRef), "NnfStorage owned by PersistentStorageInstance") @@ -345,7 +345,7 @@ var _ = Describe("Integration Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - nnfv1alpha3.RabbitNodeSelectorLabel: "true", + nnfv1alpha4.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -361,16 +361,16 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Create(context.TODO(), node)).To(Succeed()) // Create the NNF Node resource - nnfNode := &nnfv1alpha3.NnfNode{ + nnfNode := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha3.NnfNodeSpec{ + Spec: nnfv1alpha4.NnfNodeSpec{ Name: nodeName, - State: nnfv1alpha3.ResourceEnable, + State: nnfv1alpha4.ResourceEnable, }, - Status: nnfv1alpha3.NnfNodeStatus{}, + Status: nnfv1alpha4.NnfNodeStatus{}, } Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) @@ -422,26 +422,26 @@ var _ = Describe("Integration Test", func() { workflow = nil Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), dmProfile)).To(Succeed()) - dmProfExpected := &nnfv1alpha3.NnfDataMovementProfile{} + dmProfExpected := &nnfv1alpha4.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile), dmProfExpected) }).ShouldNot(Succeed()) for _, nodeName := range nodeNames { - nnfNode := &nnfv1alpha3.NnfNode{ + nnfNode := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, } Expect(k8sClient.Delete(context.TODO(), nnfNode)).To(Succeed()) - tempNnfNode := &nnfv1alpha3.NnfNode{} + tempNnfNode := &nnfv1alpha4.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNode), tempNnfNode) }).ShouldNot(Succeed()) @@ -741,7 +741,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check for an NNF Access describing the computes") - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "computes"), Namespace: workflow.Namespace, @@ -783,9 +783,9 @@ var _ = Describe("Integration Test", func() { Expect(access.Spec.StorageReference).To(MatchFields(IgnoreExtras, Fields{ "Name": Equal(storageName), "Namespace": Equal(workflow.Namespace), // Namespace is the same as the workflow - "Kind": Equal(reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name()), })) - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: access.Spec.StorageReference.Name, Namespace: access.Spec.StorageReference.Namespace, @@ -811,7 +811,7 @@ var _ = Describe("Integration Test", func() { // For shared file systems, there should also be a NNF Access for the Rabbit as well as corresponding Client Mounts per Rabbit if fsType == "gfs2" { By("Checking for an NNF Access describing the servers") - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "servers"), Namespace: workflow.Namespace, @@ -841,7 +841,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check that NNF Access describing computes is not present") - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "computes"), Namespace: workflow.Namespace, @@ -857,7 +857,7 @@ var _ = Describe("Integration Test", func() { if fsType == "gfs2" { By("Check that NNF Access describing computes is not present") - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", dbd.Name, "servers"), Namespace: workflow.Namespace, @@ -901,7 +901,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages for persistentStorageInstance should NOT be deleted") - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} Consistently(func() error { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), nnfStorage) }).Should(Succeed(), "NnfStorage should continue to exist") @@ -916,7 +916,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages associated with jobdw should be deleted") - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), nnfStorage) }).ShouldNot(Succeed(), "NnfStorage should be deleted") @@ -1009,7 +1009,7 @@ var _ = Describe("Integration Test", func() { BeforeEach(func() { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha3.DataMovementNamespace, + Name: nnfv1alpha4.DataMovementNamespace, }, } @@ -1028,12 +1028,12 @@ var _ = Describe("Integration Test", func() { }, } - dmm = &nnfv1alpha3.NnfDataMovementManager{ + dmm = &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha3.DataMovementManagerName, - Namespace: nnfv1alpha3.DataMovementNamespace, + Name: nnfv1alpha4.DataMovementManagerName, + Namespace: nnfv1alpha4.DataMovementNamespace, }, - Spec: nnfv1alpha3.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha4.NnfDataMovementManagerSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -1043,7 +1043,7 @@ var _ = Describe("Integration Test", func() { }, }, }, - Status: nnfv1alpha3.NnfDataMovementManagerStatus{ + Status: nnfv1alpha4.NnfDataMovementManagerStatus{ Ready: true, }, } @@ -1151,7 +1151,7 @@ var _ = Describe("Integration Test", func() { validateNnfAccessHasCorrectTeardownState := func(state dwsv1alpha2.WorkflowState) { Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(1)) - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d-%s", workflow.Name, 0, "servers"), Namespace: workflow.Namespace, @@ -1174,7 +1174,7 @@ var _ = Describe("Integration Test", func() { validateNnfAccessIsNotFound := func() { Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(1)) - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d-%s", workflow.Name, 0, "servers"), Namespace: workflow.Namespace, @@ -1278,15 +1278,15 @@ var _ = Describe("Integration Test", func() { By("Injecting an error in the data movement resource") - dm := &nnfv1alpha3.NnfDataMovement{ + dm := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: "failed-data-movement", - Namespace: nnfv1alpha3.DataMovementNamespace, + Namespace: nnfv1alpha4.DataMovementNamespace, }, } dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, dwsv1alpha2.StatePostRun) + nnfv1alpha4.AddDataMovementTeardownStateLabel(dm, dwsv1alpha2.StatePostRun) Expect(k8sClient.Create(context.TODO(), dm)).To(Succeed()) @@ -1294,8 +1294,8 @@ var _ = Describe("Integration Test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dm), dm) }).Should(Succeed()) - dm.Status.State = nnfv1alpha3.DataMovementConditionTypeFinished - dm.Status.Status = nnfv1alpha3.DataMovementConditionReasonFailed + dm.Status.State = nnfv1alpha4.DataMovementConditionTypeFinished + dm.Status.Status = nnfv1alpha4.DataMovementConditionReasonFailed Expect(k8sClient.Status().Update(context.TODO(), dm)).To(Succeed()) @@ -1324,7 +1324,7 @@ var _ = Describe("Integration Test", func() { Describe("Test with container directives", func() { var ( - containerProfile *nnfv1alpha3.NnfContainerProfile + containerProfile *nnfv1alpha4.NnfContainerProfile ) BeforeEach(func() { @@ -1395,7 +1395,7 @@ var _ = Describe("Integration Test", func() { By("verifying the number of targeted NNF nodes for the container jobs") matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) - matchLabels[nnfv1alpha3.DirectiveIndexLabel] = "0" + matchLabels[nnfv1alpha4.DirectiveIndexLabel] = "0" jobList := &batchv1.JobList{} Eventually(func() int { @@ -1419,9 +1419,9 @@ var _ = Describe("Integration Test", func() { var ( intendedDirective string - profileExternalMGS *nnfv1alpha3.NnfStorageProfile - profileCombinedMGTMDT *nnfv1alpha3.NnfStorageProfile - nnfLustreMgt *nnfv1alpha3.NnfLustreMGT + profileExternalMGS *nnfv1alpha4.NnfStorageProfile + profileCombinedMGTMDT *nnfv1alpha4.NnfStorageProfile + nnfLustreMgt *nnfv1alpha4.NnfLustreMGT profileMgsNid string @@ -1452,13 +1452,13 @@ var _ = Describe("Integration Test", func() { Expect(createNnfStorageProfile(profileExternalMGS, true)).ToNot(BeNil()) Expect(createNnfStorageProfile(profileCombinedMGTMDT, true)).ToNot(BeNil()) - nnfLustreMgt = &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt = &nnfv1alpha4.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "profile-mgs", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{profileMgsNid}, FsNameStart: "dddddddd", }, @@ -1565,7 +1565,7 @@ var _ = Describe("Integration Test", func() { By(fmt.Sprintf("Verify that the MGS NID %s is used by the filesystem", getNidVia)) advanceStateAndCheckReady(dwsv1alpha2.StateSetup, workflow) // The NnfStorage's name matches the Server resource's name. - nnfstorage := &nnfv1alpha3.NnfStorage{} + nnfstorage := &nnfv1alpha4.NnfStorage{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dbdServer), nnfstorage)).To(Succeed()) for _, comp := range nnfstorage.Spec.AllocationSets { Expect(comp.MgsAddress).To(Equal(desiredNid)) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index 426b793e..9b85780e 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -47,7 +47,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -83,7 +83,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( metrics.NnfAccessReconcilesTotal.Inc() - access := &nnfv1alpha3.NnfAccess{} + access := &nnfv1alpha4.NnfAccess{} if err := r.Get(ctx, req.NamespacedName, access); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -91,7 +91,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfAccessStatus](access) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfAccessStatus](access) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { access.Status.SetResourceErrorAndLog(err, log) }() @@ -200,7 +200,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } -func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha4.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { // Lock the NnfStorage by adding an annotation with the name/namespace for this // NnfAccess. This is used for non-clustered file systems that can only be mounted // from a single host. @@ -257,7 +257,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.Nnf return nil, nil } -func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha4.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { // Update client mounts to trigger unmount operation err := r.manageClientMounts(ctx, access, storageMapping) if err != nil { @@ -291,9 +291,9 @@ func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha3.N // lockStorage applies an annotation to the NnfStorage resource with the name and namespace of the NnfAccess resource. // This acts as a lock to prevent multiple NnfAccess resources from mounting the same file system. This is only necessary // for non-clustered file systems -func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess) (bool, error) { +func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alpha4.NnfAccess) (bool, error) { - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { return false, fmt.Errorf("invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -302,7 +302,7 @@ func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alph Namespace: access.Spec.StorageReference.Namespace, } - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, namespacedName, nnfStorage); err != nil { return false, err } @@ -345,10 +345,10 @@ func (r *NnfAccessReconciler) lockStorage(ctx context.Context, access *nnfv1alph } // unlockStorage removes the NnfAccess annotation from an NnfStorage resource if it was added from lockStorage() -func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess) error { - nnfStorage := &nnfv1alpha3.NnfStorage{} +func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1alpha4.NnfAccess) error { + nnfStorage := &nnfv1alpha4.NnfStorage{} - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { return nil } @@ -395,7 +395,7 @@ func (r *NnfAccessReconciler) unlockStorage(ctx context.Context, access *nnfv1al } // getClientList returns the list of client node names from either the Computes resource of the NnfStorage resource -func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1alpha3.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1alpha4.NnfAccess) ([]string, error) { if access.Spec.ClientReference != (corev1.ObjectReference{}) { return r.getClientListFromClientReference(ctx, access) } @@ -404,7 +404,7 @@ func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1al } // getClientListFromClientReference returns a list of client nodes names from the Computes resource -func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Context, access *nnfv1alpha3.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Context, access *nnfv1alpha4.NnfAccess) ([]string, error) { computes := &dwsv1alpha2.Computes{} if access.Spec.ClientReference.Kind != reflect.TypeOf(dwsv1alpha2.Computes{}).Name() { @@ -430,9 +430,9 @@ func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Conte // getClientListFromStorageReference returns a list of client node names from the NnfStorage resource. This is the list of Rabbit // nodes that host the storage -func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Context, access *nnfv1alpha3.NnfAccess) ([]string, error) { +func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Context, access *nnfv1alpha4.NnfAccess) ([]string, error) { - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { return nil, fmt.Errorf("Invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -441,7 +441,7 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont Namespace: access.Spec.StorageReference.Namespace, } - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, namespacedName, nnfStorage); err != nil { return nil, err } @@ -463,10 +463,10 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont } // mapClientStorage returns a map of the clients with a list of mounts to make. This picks a device for each client -func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string) (map[string][]dwsv1alpha2.ClientMountInfo, error) { - nnfStorage := &nnfv1alpha3.NnfStorage{} +func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha4.NnfAccess, clients []string) (map[string][]dwsv1alpha2.ClientMountInfo, error) { + nnfStorage := &nnfv1alpha4.NnfStorage{} - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { return nil, fmt.Errorf("Invalid StorageReference kind %s", access.Spec.StorageReference.Kind) } @@ -504,7 +504,7 @@ func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv // mapClientNetworkStorage provides the Lustre MGS address information for the clients. All clients get the same // mount information -func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha4.NnfAccess, clients []string, nnfStorage *nnfv1alpha4.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { storageMapping := make(map[string][]dwsv1alpha2.ClientMountInfo) for _, client := range clients { @@ -535,7 +535,7 @@ func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, acces // mapClientLocalStorage picks storage device(s) for each client to access based on locality information // from the (DWS) Storage resources. -func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha4.NnfAccess, clients []string, nnfStorage *nnfv1alpha4.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { allocationSetSpec := nnfStorage.Spec.AllocationSets[setIndex] // Use information from the NnfStorage resource to determine how many allocations @@ -567,14 +567,14 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // allocation. for nodeName, storageCount := range storageCountMap { matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha3.AllocationSetLabel] = allocationSetSpec.Name + matchLabels[nnfv1alpha4.AllocationSetLabel] = allocationSetSpec.Name listOptions := []client.ListOption{ matchLabels, client.InNamespace(nodeName), } - nnfNodeStorageList := &nnfv1alpha3.NnfNodeStorageList{} + nnfNodeStorageList := &nnfv1alpha4.NnfNodeStorageList{} if err := r.List(ctx, nnfNodeStorageList, listOptions...); err != nil { return nil, err } @@ -597,7 +597,7 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // so clientmountd will not look at the DeviceReference struct. The DeviceReference information is used by // the data movement code to match up mounts between the Rabbit and compute node. mountInfo.Device.DeviceReference = &dwsv1alpha2.ClientMountDeviceReference{} - mountInfo.Device.DeviceReference.ObjectReference.Kind = reflect.TypeOf(nnfv1alpha3.NnfNodeStorage{}).Name() + mountInfo.Device.DeviceReference.ObjectReference.Kind = reflect.TypeOf(nnfv1alpha4.NnfNodeStorage{}).Name() mountInfo.Device.DeviceReference.ObjectReference.Name = nnfNodeStorage.Name mountInfo.Device.DeviceReference.ObjectReference.Namespace = nnfNodeStorage.Namespace mountInfo.Device.DeviceReference.Data = i @@ -711,7 +711,7 @@ type mountReference struct { // addNodeStorageEndpoints adds the compute node information to the NnfNodeStorage resource // so it can make the NVMe namespaces accessible on the compute node. This is done on the rabbit // by creating StorageGroup resources through swordfish for the correct endpoint. -func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha4.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be added for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -728,7 +728,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfNodeStorage{}).Name() { continue } @@ -744,7 +744,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access // Loop through the NnfNodeBlockStorages and add client access information for each of the // computes that need access to an allocation. for nodeStorageReference, mountRefList := range nodeStorageMap { - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeStorageReference.Name, Namespace: nodeStorageReference.Namespace, @@ -755,7 +755,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access return err } - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.Spec.BlockReference.Name, Namespace: nnfNodeStorage.Spec.BlockReference.Namespace, @@ -801,7 +801,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access return nil } -func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { +func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha4.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { // NnfNodeStorage clientReferences only need to be checked for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -818,7 +818,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfNodeStorage{}).Name() { continue } @@ -832,10 +832,10 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a } } - nnfNodeBlockStorages := []nnfv1alpha3.NnfNodeBlockStorage{} + nnfNodeBlockStorages := []nnfv1alpha4.NnfNodeBlockStorage{} for nodeStorageReference := range nodeStorageMap { - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeStorageReference.Name, Namespace: nodeStorageReference.Namespace, @@ -846,7 +846,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a return false, err } - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorage.Spec.BlockReference.Name, Namespace: nnfNodeStorage.Spec.BlockReference.Namespace, @@ -890,7 +890,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a // removeNodeStorageEndpoints modifies the NnfNodeStorage resources to remove the client endpoints for the // compute nodes that had mounted the storage. This causes NnfNodeStorage to remove the StorageGroups for // those compute nodes and remove access to the NVMe namespaces from the computes. -func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha4.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be removed for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -907,7 +907,7 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfNodeStorage{}).Name() { continue } @@ -924,7 +924,7 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce Namespace: nodeBlockStorageReference.Namespace, } - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{} + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{} err := r.Get(ctx, namespacedName, nnfNodeBlockStorage) if err != nil { if apierrors.IsNotFound(err) { @@ -952,18 +952,18 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce } // manageClientMounts creates or updates the ClientMount resources based on the information in the storageMapping map. -func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha4.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { log := r.Log.WithValues("NnfAccess", client.ObjectKeyFromObject(access)) if !access.Spec.MakeClientMounts { return nil } - if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { return dwsv1alpha2.NewResourceError("invalid StorageReference kind %s", access.Spec.StorageReference.Kind).WithFatal() } - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: access.Spec.StorageReference.Name, Namespace: access.Spec.StorageReference.Namespace, @@ -1032,7 +1032,7 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn } // getClientMountStatus aggregates the status from all the ClientMount resources -func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string) (bool, error) { +func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access *nnfv1alpha4.NnfAccess, clientList []string) (bool, error) { log := r.Log.WithValues("NnfAccess", client.ObjectKeyFromObject(access)) if !access.Spec.MakeClientMounts { @@ -1118,7 +1118,7 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * return true, nil } -func clientMountName(access *nnfv1alpha3.NnfAccess) string { +func clientMountName(access *nnfv1alpha4.NnfAccess) string { return access.Namespace + "-" + access.Name } @@ -1192,7 +1192,7 @@ func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o cli }), } - nnfAccessList := &nnfv1alpha3.NnfAccessList{} + nnfAccessList := &nnfv1alpha4.NnfAccessList{} if err := r.List(context.TODO(), nnfAccessList, listOptions...); err != nil { log.Info("Could not list NnfAccesses", "error", err) return requests @@ -1230,7 +1230,7 @@ func (r *NnfAccessReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha3.NnfAccess{}). + For(&nnfv1alpha4.NnfAccess{}). Watches(&dwsv1alpha2.Computes{}, handler.EnqueueRequestsFromMapFunc(r.ComputesEnqueueRequests)). Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) diff --git a/internal/controller/nnf_access_controller_test.go b/internal/controller/nnf_access_controller_test.go index b7860092..c0f6b8ad 100644 --- a/internal/controller/nnf_access_controller_test.go +++ b/internal/controller/nnf_access_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("Access Controller Test", func() { @@ -43,11 +43,11 @@ var _ = Describe("Access Controller Test", func() { "rabbit-nnf-access-test-node-1", "rabbit-nnf-access-test-node-2"} - nnfNodes := [2]*nnfv1alpha3.NnfNode{} + nnfNodes := [2]*nnfv1alpha4.NnfNode{} nodes := [2]*corev1.Node{} var systemConfiguration *dwsv1alpha2.SystemConfiguration - var storageProfile *nnfv1alpha3.NnfStorageProfile + var storageProfile *nnfv1alpha4.NnfStorageProfile var setup sync.Once BeforeEach(func() { @@ -84,7 +84,7 @@ var _ = Describe("Access Controller Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - nnfv1alpha3.RabbitNodeSelectorLabel: "true", + nnfv1alpha4.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -99,14 +99,14 @@ var _ = Describe("Access Controller Test", func() { Expect(k8sClient.Create(context.TODO(), nodes[i])).To(Succeed()) - nnfNodes[i] = &nnfv1alpha3.NnfNode{ + nnfNodes[i] = &nnfv1alpha4.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha3.NnfNodeSpec{ - State: nnfv1alpha3.ResourceEnable, + Spec: nnfv1alpha4.NnfNodeSpec{ + State: nnfv1alpha4.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNodes[i])).To(Succeed()) @@ -137,14 +137,14 @@ var _ = Describe("Access Controller Test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) for i := range nodeNames { Expect(k8sClient.Delete(context.TODO(), nnfNodes[i])).To(Succeed()) - tempNnfNode := &nnfv1alpha3.NnfNode{} + tempNnfNode := &nnfv1alpha4.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), tempNnfNode) }).ShouldNot(Succeed()) @@ -166,29 +166,29 @@ var _ = Describe("Access Controller Test", func() { Describe("Create Client Mounts", func() { It("Creates Lustre Client Mount", func() { - allocationNodes := make([]nnfv1alpha3.NnfStorageAllocationNodes, len(nodeNames)) + allocationNodes := make([]nnfv1alpha4.NnfStorageAllocationNodes, len(nodeNames)) for idx, nodeName := range nodeNames { - allocationNodes[idx] = nnfv1alpha3.NnfStorageAllocationNodes{ + allocationNodes[idx] = nnfv1alpha4.NnfStorageAllocationNodes{ Count: 1, Name: nodeName, } } - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-lustre", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfStorageSpec{ + Spec: nnfv1alpha4.NnfStorageSpec{ FileSystemType: "lustre", - AllocationSets: []nnfv1alpha3.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{ { Name: "mgtmdt", Capacity: 50000000000, - NnfStorageLustreSpec: nnfv1alpha3.NnfStorageLustreSpec{ + NnfStorageLustreSpec: nnfv1alpha4.NnfStorageLustreSpec{ TargetType: "mgtmdt", }, - Nodes: []nnfv1alpha3.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -198,7 +198,7 @@ var _ = Describe("Access Controller Test", func() { { Name: "ost", Capacity: 50000000000, - NnfStorageLustreSpec: nnfv1alpha3.NnfStorageLustreSpec{ + NnfStorageLustreSpec: nnfv1alpha4.NnfStorageLustreSpec{ TargetType: "ost", }, Nodes: allocationNodes, @@ -212,18 +212,18 @@ var _ = Describe("Access Controller Test", func() { It("Creates XFS Client Mount", func() { - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-xfs", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfStorageSpec{ + Spec: nnfv1alpha4.NnfStorageSpec{ FileSystemType: "xfs", - AllocationSets: []nnfv1alpha3.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{ { Name: "xfs", Capacity: 50000000000, - Nodes: []nnfv1alpha3.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -243,18 +243,18 @@ var _ = Describe("Access Controller Test", func() { It("Creates GFS2 Client Mount", func() { - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-storage-gfs2", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfStorageSpec{ + Spec: nnfv1alpha4.NnfStorageSpec{ FileSystemType: "gfs2", - AllocationSets: []nnfv1alpha3.NnfStorageAllocationSetSpec{ + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{ { Name: "gfs2", Capacity: 50000000000, - Nodes: []nnfv1alpha3.NnfStorageAllocationNodes{ + Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ { Count: 1, Name: nodeNames[0], @@ -274,7 +274,7 @@ var _ = Describe("Access Controller Test", func() { }) }) -func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alpha3.NnfStorageProfile, nodeNames []string) { +func verifyClientMount(storage *nnfv1alpha4.NnfStorage, storageProfile *nnfv1alpha4.NnfStorageProfile, nodeNames []string) { Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed(), "Create NNF Storage") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) @@ -283,12 +283,12 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp }).Should(Succeed()) mountPath := "/mnt/nnf/12345-0/" - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-access-test-access-" + storage.Spec.FileSystemType, Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfAccessSpec{ + Spec: nnfv1alpha4.NnfAccessSpec{ DesiredState: "mounted", TeardownState: dwsv1alpha2.StatePreRun, @@ -299,7 +299,7 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp MountPathPrefix: mountPath, StorageReference: corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, }, diff --git a/internal/controller/nnf_clientmount_controller.go b/internal/controller/nnf_clientmount_controller.go index 0a65e052..36292d01 100644 --- a/internal/controller/nnf_clientmount_controller.go +++ b/internal/controller/nnf_clientmount_controller.go @@ -44,7 +44,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -348,7 +348,7 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, ownerKind, ownerExists := clientMount.Labels[dwsv1alpha2.OwnerKindLabel] ownerName, ownerNameExists := clientMount.Labels[dwsv1alpha2.OwnerNameLabel] ownerNS, ownerNSExists := clientMount.Labels[dwsv1alpha2.OwnerNamespaceLabel] - _, idxExists := clientMount.Labels[nnfv1alpha3.DirectiveIndexLabel] + _, idxExists := clientMount.Labels[nnfv1alpha4.DirectiveIndexLabel] // We should expect the owner of the ClientMount to be NnfStorage and have the expected labels if !ownerExists || !ownerNameExists || !ownerNSExists || !idxExists || ownerKind != storageKind { @@ -356,7 +356,7 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, } // Retrieve the NnfStorage resource - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: ownerName, Namespace: ownerNS, @@ -370,7 +370,7 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, ownerKind, ownerExists = storage.Labels[dwsv1alpha2.OwnerKindLabel] ownerName, ownerNameExists = storage.Labels[dwsv1alpha2.OwnerNameLabel] ownerNS, ownerNSExists = storage.Labels[dwsv1alpha2.OwnerNamespaceLabel] - idx, idxExists := storage.Labels[nnfv1alpha3.DirectiveIndexLabel] + idx, idxExists := storage.Labels[nnfv1alpha4.DirectiveIndexLabel] // We should expect the owner of the NnfStorage to be Workflow or PersistentStorageInstance and // have the expected labels @@ -386,7 +386,7 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, client.MatchingLabels(map[string]string{ dwsv1alpha2.WorkflowNameLabel: ownerName, dwsv1alpha2.WorkflowNamespaceLabel: ownerNS, - nnfv1alpha3.DirectiveIndexLabel: idx, + nnfv1alpha4.DirectiveIndexLabel: idx, }), } } else { @@ -449,8 +449,8 @@ func createLustreMapping(server *dwsv1alpha2.Servers) map[string][]string { // fakeNnfNodeStorage creates an NnfNodeStorage resource filled in with only the fields // that are necessary to mount the file system. This is done to reduce the API server load // because the compute nodes don't need to Get() the actual NnfNodeStorage. -func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int) (*nnfv1alpha3.NnfNodeStorage, error) { - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ +func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int) (*nnfv1alpha4.NnfNodeStorage, error) { + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Name, Namespace: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Namespace, @@ -462,7 +462,7 @@ func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clien // labels that are important for doing the mount are there and correct dwsv1alpha2.InheritParentLabels(nnfNodeStorage, clientMount) labels := nnfNodeStorage.GetLabels() - labels[nnfv1alpha3.DirectiveIndexLabel] = getTargetDirectiveIndexLabel(clientMount) + labels[nnfv1alpha4.DirectiveIndexLabel] = getTargetDirectiveIndexLabel(clientMount) labels[dwsv1alpha2.OwnerUidLabel] = getTargetOwnerUIDLabel(clientMount) nnfNodeStorage.SetLabels(labels) diff --git a/internal/controller/nnf_lustre_mgt_controller.go b/internal/controller/nnf_lustre_mgt_controller.go index df71976d..fbf00c71 100644 --- a/internal/controller/nnf_lustre_mgt_controller.go +++ b/internal/controller/nnf_lustre_mgt_controller.go @@ -39,7 +39,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/command" ) @@ -85,7 +85,7 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request metrics.NnfLustreMGTReconcilesTotal.Inc() - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{} + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{} if err := r.Get(ctx, req.NamespacedName, nnfLustreMgt); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -93,7 +93,7 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfLustreMGTStatus](nnfLustreMgt) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfLustreMGTStatus](nnfLustreMgt) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfLustreMgt.Status.SetResourceErrorAndLog(err, log) }() @@ -224,7 +224,7 @@ func incrementFsName(fsname string) string { return string(incrementRuneList(runeList, 'a', 'z')) } -func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha3.NnfLustreMGT, fsname string) bool { +func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha4.NnfLustreMGT, fsname string) bool { // Check the blacklist for _, blackListedFsName := range nnfLustreMgt.Spec.FsNameBlackList { if fsname == blackListedFsName { @@ -237,7 +237,7 @@ func isFsNameBlackListed(nnfLustreMgt *nnfv1alpha3.NnfLustreMGT, fsname string) // SetFsNameNext sets the Status.FsNameNext field to the next available fsname. It also // updates the configmap the FsNameStartReference field if needed. -func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt *nnfv1alpha3.NnfLustreMGT, fsname string) (*ctrl.Result, error) { +func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt *nnfv1alpha4.NnfLustreMGT, fsname string) (*ctrl.Result, error) { // Find the next available fsname that isn't blacklisted for { fsname = incrementFsName(fsname) @@ -286,7 +286,7 @@ func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt // HandleNewClaims looks for any new claims in Spec.ClaimList and assigns them // an fsname -func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha3.NnfLustreMGT) (*ctrl.Result, error) { +func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha4.NnfLustreMGT) (*ctrl.Result, error) { claimMap := map[corev1.ObjectReference]string{} for _, claim := range nnfLustreMgt.Status.ClaimList { claimMap[claim.Reference] = claim.FsName @@ -304,7 +304,7 @@ func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreM return result, nil } - newClaim := nnfv1alpha3.NnfLustreMGTStatusClaim{ + newClaim := nnfv1alpha4.NnfLustreMGTStatusClaim{ Reference: reference, FsName: fsnameNext, } @@ -320,7 +320,7 @@ func (r *NnfLustreMGTReconciler) HandleNewClaims(ctx context.Context, nnfLustreM // RemoveOldClaims removes any old entries from the Status.ClaimList and erases the fsname from // the MGT if necessary. -func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha3.NnfLustreMGT) error { +func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreMgt *nnfv1alpha4.NnfLustreMGT) error { claimMap := map[corev1.ObjectReference]bool{} for _, reference := range nnfLustreMgt.Spec.ClaimList { claimMap[reference] = true @@ -341,7 +341,7 @@ func (r *NnfLustreMGTReconciler) RemoveOldClaims(ctx context.Context, nnfLustreM return nil } -func (r *NnfLustreMGTReconciler) EraseOldFsName(nnfLustreMgt *nnfv1alpha3.NnfLustreMGT, fsname string) error { +func (r *NnfLustreMGTReconciler) EraseOldFsName(nnfLustreMgt *nnfv1alpha4.NnfLustreMGT, fsname string) error { log := r.Log.WithValues("NnfLustreMGT", client.ObjectKeyFromObject(nnfLustreMgt)) if os.Getenv("ENVIRONMENT") == "kind" { @@ -386,7 +386,7 @@ func filterByNnfSystemNamespace() predicate.Predicate { func (r *NnfLustreMGTReconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 1}). - For(&nnfv1alpha3.NnfLustreMGT{}) + For(&nnfv1alpha4.NnfLustreMGT{}) switch r.ControllerType { case ControllerRabbit: diff --git a/internal/controller/nnf_lustre_mgt_controller_test.go b/internal/controller/nnf_lustre_mgt_controller_test.go index d2bcc55a..cc022980 100644 --- a/internal/controller/nnf_lustre_mgt_controller_test.go +++ b/internal/controller/nnf_lustre_mgt_controller_test.go @@ -30,18 +30,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("NnfLustreMGT Controller Test", func() { It("Verifies a single fsname consumer", func() { - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "bbbbbbbb", }, @@ -87,13 +87,13 @@ var _ = Describe("NnfLustreMGT Controller Test", func() { }) It("Verifies two fsname consumers with fsname wrap", func() { - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "zzzzzzzz", }, @@ -164,13 +164,13 @@ var _ = Describe("NnfLustreMGT Controller Test", func() { } Expect(k8sClient.Create(context.TODO(), configMap)).To(Succeed()) - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-mgt", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{"1.1.1.1@tcp"}, FsNameStart: "bbbbbbbb", FsNameStartReference: corev1.ObjectReference{ diff --git a/internal/controller/nnf_node_block_storage_controller.go b/internal/controller/nnf_node_block_storage_controller.go index 8fa3eb3c..0978be69 100644 --- a/internal/controller/nnf_node_block_storage_controller.go +++ b/internal/controller/nnf_node_block_storage_controller.go @@ -55,7 +55,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice/nvme" ) @@ -106,7 +106,7 @@ func (r *NnfNodeBlockStorageReconciler) EventHandler(e nnfevent.Event) error { log.Info("triggering watch") - r.Events <- event.GenericEvent{Object: &nnfv1alpha3.NnfNodeBlockStorage{ + r.Events <- event.GenericEvent{Object: &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-ec-event", Namespace: "nnf-ec-event", @@ -159,7 +159,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. metrics.NnfNodeBlockStorageReconcilesTotal.Inc() - nodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{} + nodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{} if err := r.Get(ctx, req.NamespacedName, nodeBlockStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -178,7 +178,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfNodeBlockStorageStatus](nodeBlockStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfNodeBlockStorageStatus](nodeBlockStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nodeBlockStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -226,9 +226,9 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. // Initialize the status section with empty allocation statuses. if len(nodeBlockStorage.Status.Allocations) == 0 { - nodeBlockStorage.Status.Allocations = make([]nnfv1alpha3.NnfNodeBlockStorageAllocationStatus, len(nodeBlockStorage.Spec.Allocations)) + nodeBlockStorage.Status.Allocations = make([]nnfv1alpha4.NnfNodeBlockStorageAllocationStatus, len(nodeBlockStorage.Spec.Allocations)) for i := range nodeBlockStorage.Status.Allocations { - nodeBlockStorage.Status.Allocations[i].Accesses = make(map[string]nnfv1alpha3.NnfNodeBlockStorageAccessStatus) + nodeBlockStorage.Status.Allocations[i].Accesses = make(map[string]nnfv1alpha4.NnfNodeBlockStorageAccessStatus) } return ctrl.Result{}, nil @@ -287,7 +287,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } -func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1alpha4.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) @@ -307,7 +307,7 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a } if len(allocationStatus.Devices) == 0 { - allocationStatus.Devices = make([]nnfv1alpha3.NnfNodeBlockStorageDeviceStatus, len(vc.Members)) + allocationStatus.Devices = make([]nnfv1alpha4.NnfNodeBlockStorageDeviceStatus, len(vc.Members)) } if len(allocationStatus.Devices) != len(vc.Members) { @@ -345,7 +345,7 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a return nil, nil } -func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, nodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, nodeBlockStorage *nnfv1alpha4.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) @@ -423,7 +423,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } else { // The kind environment doesn't support endpoints beyond the Rabbit if os.Getenv("ENVIRONMENT") == "kind" && endpointID != os.Getenv("RABBIT_NODE") { - allocationStatus.Accesses[nodeName] = nnfv1alpha3.NnfNodeBlockStorageAccessStatus{StorageGroupId: "fake-storage-group"} + allocationStatus.Accesses[nodeName] = nnfv1alpha4.NnfNodeBlockStorageAccessStatus{StorageGroupId: "fake-storage-group"} continue } @@ -433,7 +433,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } // Skip the endpoints that are not ready - if nnfv1alpha3.StaticResourceStatus(endPoint.Status) != nnfv1alpha3.ResourceReady { + if nnfv1alpha4.StaticResourceStatus(endPoint.Status) != nnfv1alpha4.ResourceReady { continue } @@ -443,13 +443,13 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } if allocationStatus.Accesses == nil { - allocationStatus.Accesses = make(map[string]nnfv1alpha3.NnfNodeBlockStorageAccessStatus) + allocationStatus.Accesses = make(map[string]nnfv1alpha4.NnfNodeBlockStorageAccessStatus) } // If the access status doesn't exist then we just created the resource. Save the ID in the NnfNodeBlockStorage if _, ok := allocationStatus.Accesses[nodeName]; !ok { log.Info("Created storage group", "Id", storageGroupId) - allocationStatus.Accesses[nodeName] = nnfv1alpha3.NnfNodeBlockStorageAccessStatus{StorageGroupId: sg.Id} + allocationStatus.Accesses[nodeName] = nnfv1alpha4.NnfNodeBlockStorageAccessStatus{StorageGroupId: sg.Id} } // The device paths are discovered below. This is only relevant for the Rabbit node access @@ -506,7 +506,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } -func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alpha4.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) @@ -531,7 +531,7 @@ func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alp return nil, nil } -func getStoragePoolID(nodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage, index int) string { +func getStoragePoolID(nodeBlockStorage *nnfv1alpha4.NnfNodeBlockStorage, index int) string { return fmt.Sprintf("%s-%d", nodeBlockStorage.Name, index) } @@ -643,7 +643,7 @@ func (r *NnfNodeBlockStorageReconciler) NnfEcEventEnqueueHandler(ctx context.Con client.InNamespace(os.Getenv("NNF_NODE_NAME")), } - nnfNodeBlockStorageList := &nnfv1alpha3.NnfNodeBlockStorageList{} + nnfNodeBlockStorageList := &nnfv1alpha4.NnfNodeBlockStorageList{} if err := r.List(context.TODO(), nnfNodeBlockStorageList, listOptions...); err != nil { log.Error(err, "Could not list block storages") @@ -652,7 +652,7 @@ func (r *NnfNodeBlockStorageReconciler) NnfEcEventEnqueueHandler(ctx context.Con time.Sleep(time.Second * 10) log.Info("triggering watch after List() error") - r.Events <- event.GenericEvent{Object: &nnfv1alpha3.NnfNodeBlockStorage{ + r.Events <- event.GenericEvent{Object: &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-ec-event", Namespace: "nnf-ec-event", @@ -681,7 +681,7 @@ func (r *NnfNodeBlockStorageReconciler) SetupWithManager(mgr ctrl.Manager) error // nnf-ec is not thread safe, so we are limited to a single reconcile thread. return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 1}). - For(&nnfv1alpha3.NnfNodeBlockStorage{}). + For(&nnfv1alpha4.NnfNodeBlockStorage{}). WatchesRawSource(&source.Channel{Source: r.Events}, handler.EnqueueRequestsFromMapFunc(r.NnfEcEventEnqueueHandler)). Complete(r) } diff --git a/internal/controller/nnf_node_controller.go b/internal/controller/nnf_node_controller.go index a673e13c..fc5165b4 100644 --- a/internal/controller/nnf_node_controller.go +++ b/internal/controller/nnf_node_controller.go @@ -53,7 +53,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -118,7 +118,7 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { log.Info("Created Namespace") } - node := &nnfv1alpha3.NnfNode{} + node := &nnfv1alpha4.NnfNode{} if err := r.Get(ctx, r.NamespacedName, node); err != nil { if !errors.IsNotFound(err) { @@ -139,7 +139,7 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { } else { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - node := &nnfv1alpha3.NnfNode{} + node := &nnfv1alpha4.NnfNode{} if err := r.Get(ctx, r.NamespacedName, node); err != nil { return err } @@ -155,8 +155,8 @@ func (r *NnfNodeReconciler) Start(ctx context.Context) error { } // Mark the node's status as starting - if node.Status.Status != nnfv1alpha3.ResourceStarting { - node.Status.Status = nnfv1alpha3.ResourceStarting + if node.Status.Status != nnfv1alpha4.ResourceStarting { + node.Status.Status = nnfv1alpha4.ResourceStarting if err := r.Status().Update(ctx, node); err != nil { return err @@ -205,7 +205,7 @@ func (r *NnfNodeReconciler) EventHandler(e nnfevent.Event) error { log.Info("triggering watch") - r.Events <- event.GenericEvent{Object: &nnfv1alpha3.NnfNode{ + r.Events <- event.GenericEvent{Object: &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: r.NamespacedName.Name, Namespace: r.NamespacedName.Namespace, @@ -235,7 +235,7 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re metrics.NnfNodeReconcilesTotal.Inc() - node := &nnfv1alpha3.NnfNode{} + node := &nnfv1alpha4.NnfNode{} if err := r.Get(ctx, req.NamespacedName, node); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -244,7 +244,7 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } // Prepare to update the node's status - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfNodeStatus](node) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfNodeStatus](node) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Access the default storage service running in the NNF Element @@ -257,8 +257,8 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re return ctrl.Result{}, err } - node.Status.Status = nnfv1alpha3.ResourceStatus(storageService.Status) - node.Status.Health = nnfv1alpha3.ResourceHealth(storageService.Status) + node.Status.Status = nnfv1alpha4.ResourceStatus(storageService.Status) + node.Status.Health = nnfv1alpha4.ResourceHealth(storageService.Status) if storageService.Status.State != sf.ENABLED_RST { return ctrl.Result{RequeueAfter: 1 * time.Second}, nil @@ -348,26 +348,26 @@ func (r *NnfNodeReconciler) createNamespace() *corev1.Namespace { } } -func (r *NnfNodeReconciler) createNode() *nnfv1alpha3.NnfNode { - return &nnfv1alpha3.NnfNode{ +func (r *NnfNodeReconciler) createNode() *nnfv1alpha4.NnfNode { + return &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: r.Name, Namespace: r.Namespace, }, - Spec: nnfv1alpha3.NnfNodeSpec{ + Spec: nnfv1alpha4.NnfNodeSpec{ Name: r.Namespace, // Note the conversion here from namespace to name, each NNF Node is given a unique namespace, which then becomes how the NLC is controlled. Pod: os.Getenv("NNF_POD_NAME"), // Providing the podname gives users quick means to query the pod for a particular NNF Node - State: nnfv1alpha3.ResourceEnable, + State: nnfv1alpha4.ResourceEnable, }, - Status: nnfv1alpha3.NnfNodeStatus{ - Status: nnfv1alpha3.ResourceStarting, + Status: nnfv1alpha4.NnfNodeStatus{ + Status: nnfv1alpha4.ResourceStarting, Capacity: 0, }, } } // Update the Servers status of the NNF Node if necessary -func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha3.NnfNode, log logr.Logger) error { +func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha4.NnfNode, log logr.Logger) error { ss := nnf.NewDefaultStorageService(r.Options.DeleteUnknownVolumes()) @@ -379,7 +379,7 @@ func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha3.NnfNode, log logr.Lo } if len(node.Status.Servers) < len(serverEndpointCollection.Members) { - node.Status.Servers = make([]nnfv1alpha3.NnfServerStatus, len(serverEndpointCollection.Members)) + node.Status.Servers = make([]nnfv1alpha4.NnfServerStatus, len(serverEndpointCollection.Members)) } // Iterate over the server endpoints to ensure we've reflected @@ -393,11 +393,11 @@ func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha3.NnfNode, log logr.Lo return err } - node.Status.Servers[idx].NnfResourceStatus = nnfv1alpha3.NnfResourceStatus{ + node.Status.Servers[idx].NnfResourceStatus = nnfv1alpha4.NnfResourceStatus{ ID: serverEndpoint.Id, Name: serverEndpoint.Name, - Status: nnfv1alpha3.ResourceStatus(serverEndpoint.Status), - Health: nnfv1alpha3.ResourceHealth(serverEndpoint.Status), + Status: nnfv1alpha4.ResourceStatus(serverEndpoint.Status), + Health: nnfv1alpha4.ResourceHealth(serverEndpoint.Status), } } @@ -405,7 +405,7 @@ func (r *NnfNodeReconciler) updateServers(node *nnfv1alpha3.NnfNode, log logr.Lo } // Update the Drives status of the NNF Node if necessary -func updateDrives(node *nnfv1alpha3.NnfNode, log logr.Logger) error { +func updateDrives(node *nnfv1alpha4.NnfNode, log logr.Logger) error { storageService := nvme.NewDefaultStorageService() storageCollection := &sf.StorageCollectionStorageCollection{} @@ -415,7 +415,7 @@ func updateDrives(node *nnfv1alpha3.NnfNode, log logr.Logger) error { } if len(node.Status.Drives) < len(storageCollection.Members) { - node.Status.Drives = make([]nnfv1alpha3.NnfDriveStatus, len(storageCollection.Members)) + node.Status.Drives = make([]nnfv1alpha4.NnfDriveStatus, len(storageCollection.Members)) } // Iterate over the storage devices and controllers to ensure we've reflected @@ -431,11 +431,11 @@ func updateDrives(node *nnfv1alpha3.NnfNode, log logr.Logger) error { } drive.Slot = fmt.Sprintf("%d", storage.Location.PartLocation.LocationOrdinalValue) - drive.NnfResourceStatus = nnfv1alpha3.NnfResourceStatus{ + drive.NnfResourceStatus = nnfv1alpha4.NnfResourceStatus{ ID: storage.Id, Name: storage.Name, - Status: nnfv1alpha3.ResourceStatus(storage.Status), - Health: nnfv1alpha3.ResourceHealth(storage.Status), + Status: nnfv1alpha4.ResourceStatus(storage.Status), + Health: nnfv1alpha4.ResourceHealth(storage.Status), } if storage.Status.State == sf.ENABLED_RST { @@ -499,7 +499,7 @@ func (r *NnfNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { // There can be only one NnfNode resource for this controller to // manage, so we don't set MaxConcurrentReconciles. return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha3.NnfNode{}). + For(&nnfv1alpha4.NnfNode{}). Owns(&corev1.Namespace{}). // The node will create a namespace for itself, so it can watch changes to the NNF Node custom resource Watches(&dwsv1alpha2.SystemConfiguration{}, handler.EnqueueRequestsFromMapFunc(systemConfigurationMapFunc)). WatchesRawSource(&source.Channel{Source: r.Events}, &handler.EnqueueRequestForObject{}). diff --git a/internal/controller/nnf_node_ec_data_controller.go b/internal/controller/nnf_node_ec_data_controller.go index 6708f963..f4097960 100644 --- a/internal/controller/nnf_node_ec_data_controller.go +++ b/internal/controller/nnf_node_ec_data_controller.go @@ -34,7 +34,7 @@ import ( nnfec "github.com/NearNodeFlash/nnf-ec/pkg" ec "github.com/NearNodeFlash/nnf-ec/pkg/ec" "github.com/NearNodeFlash/nnf-ec/pkg/persistent" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" "github.com/go-logr/logr" @@ -71,14 +71,14 @@ func (r *NnfNodeECDataReconciler) Start(ctx context.Context) error { if !testing { // Create the resource if necessary - data := nnfv1alpha3.NnfNodeECData{} + data := nnfv1alpha4.NnfNodeECData{} if err := r.Get(ctx, r.NamespacedName, &data); err != nil { if !errors.IsNotFound(err) { return err } - data := nnfv1alpha3.NnfNodeECData{ + data := nnfv1alpha4.NnfNodeECData{ ObjectMeta: metav1.ObjectMeta{ Name: r.Name, Namespace: r.Namespace, @@ -172,7 +172,7 @@ func (*crdPersistentStorageInterface) Close() error { } func (psi *crdPersistentStorageInterface) View(fn func(persistent.PersistentStorageTransactionApi) error) error { - data := nnfv1alpha3.NnfNodeECData{} + data := nnfv1alpha4.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } @@ -184,17 +184,17 @@ func (psi *crdPersistentStorageInterface) Update(fn func(persistent.PersistentSt Retry: - data := nnfv1alpha3.NnfNodeECData{} + data := nnfv1alpha4.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } if data.Status.Data == nil { - data.Status.Data = make(map[string]nnfv1alpha3.NnfNodeECPrivateData) + data.Status.Data = make(map[string]nnfv1alpha4.NnfNodeECPrivateData) } if _, found := data.Status.Data[psi.name]; !found { - data.Status.Data[psi.name] = make(nnfv1alpha3.NnfNodeECPrivateData) + data.Status.Data[psi.name] = make(nnfv1alpha4.NnfNodeECPrivateData) } if err := fn(persistent.NewBase64PersistentStorageTransaction(data.Status.Data[psi.name])); err != nil { @@ -216,7 +216,7 @@ func (psi *crdPersistentStorageInterface) Delete(key string) error { Retry: - data := nnfv1alpha3.NnfNodeECData{} + data := nnfv1alpha4.NnfNodeECData{} if err := psi.reconciler.Get(context.TODO(), psi.reconciler.NamespacedName, &data); err != nil { return err } @@ -241,6 +241,6 @@ func (r *NnfNodeECDataReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha3.NnfNodeECData{}). + For(&nnfv1alpha4.NnfNodeECData{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller.go b/internal/controller/nnf_node_storage_controller.go index 94dc8dc2..e9f27ca2 100644 --- a/internal/controller/nnf_node_storage_controller.go +++ b/internal/controller/nnf_node_storage_controller.go @@ -39,7 +39,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -109,7 +109,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque metrics.NnfNodeStorageReconcilesTotal.Inc() - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{} + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{} if err := r.Get(ctx, req.NamespacedName, nnfNodeStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -125,7 +125,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // so when we would normally call "return ctrl.Result{}, nil", at that time // "err" is nil - and if permitted we will update err with the result of // the r.Update() - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfNodeStorageStatus](nnfNodeStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfNodeStorageStatus](nnfNodeStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfNodeStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -184,7 +184,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Initialize the status section with empty allocation statuses. if len(nnfNodeStorage.Status.Allocations) == 0 { - nnfNodeStorage.Status.Allocations = make([]nnfv1alpha3.NnfNodeStorageAllocationStatus, nnfNodeStorage.Spec.Count) + nnfNodeStorage.Status.Allocations = make([]nnfv1alpha4.NnfNodeStorageAllocationStatus, nnfNodeStorage.Spec.Count) for i := range nnfNodeStorage.Status.Allocations { nnfNodeStorage.Status.Allocations[i].Ready = false } @@ -228,7 +228,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } -func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, index int) (*ctrl.Result, error) { +func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage), "index", index) blockDevice, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, index, log) @@ -303,7 +303,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode return nil, nil } -func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNodeStorage *nnfv1alpha3.NnfNodeStorage, blockDevices []blockdevice.BlockDevice, fileSystems []filesystem.FileSystem) (*ctrl.Result, error) { +func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, blockDevices []blockdevice.BlockDevice, fileSystems []filesystem.FileSystem) (*ctrl.Result, error) { log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage)) for index, blockDevice := range blockDevices { @@ -377,6 +377,6 @@ func (r *NnfNodeStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha3.NnfNodeStorage{}). + For(&nnfv1alpha4.NnfNodeStorage{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller_test.go b/internal/controller/nnf_node_storage_controller_test.go index 87a973ab..366ead0d 100644 --- a/internal/controller/nnf_node_storage_controller_test.go +++ b/internal/controller/nnf_node_storage_controller_test.go @@ -31,13 +31,13 @@ import ( "k8s.io/apimachinery/pkg/types" nnf "github.com/NearNodeFlash/nnf-ec/pkg" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = PDescribe("NNF Node Storage Controller Test", func() { var ( key types.NamespacedName - storage *nnfv1alpha3.NnfNodeStorage + storage *nnfv1alpha4.NnfNodeStorage ) BeforeEach(func() { @@ -55,12 +55,12 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { Namespace: corev1.NamespaceDefault, } - storage = &nnfv1alpha3.NnfNodeStorage{ + storage = &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, }, - Spec: nnfv1alpha3.NnfNodeStorageSpec{ + Spec: nnfv1alpha4.NnfNodeStorageSpec{ Count: 1, }, } @@ -70,13 +70,13 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed()) Eventually(func() error { - expected := &nnfv1alpha3.NnfNodeStorage{} + expected := &nnfv1alpha4.NnfNodeStorage{} return k8sClient.Get(context.TODO(), key, expected) }, "3s", "1s").Should(Succeed(), "expected return after create. key: "+key.String()) }) AfterEach(func() { - expected := &nnfv1alpha3.NnfNodeStorage{} + expected := &nnfv1alpha4.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) Expect(k8sClient.Delete(context.TODO(), expected)).To(Succeed()) }) @@ -87,7 +87,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { }) It("is successful", func() { - expected := &nnfv1alpha3.NnfNodeStorage{} + expected := &nnfv1alpha4.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) }) }) @@ -96,7 +96,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { BeforeEach(func() { storage.Spec.FileSystemType = "lustre" - storage.Spec.LustreStorage = nnfv1alpha3.LustreStorageSpec{ + storage.Spec.LustreStorage = nnfv1alpha4.LustreStorageSpec{ FileSystemName: "test", StartIndex: 0, MgsAddress: "test", @@ -106,7 +106,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { }) It("is successful", func() { - expected := &nnfv1alpha3.NnfNodeStorage{} + expected := &nnfv1alpha4.NnfNodeStorage{} Expect(k8sClient.Get(context.TODO(), key, expected)).To(Succeed()) }) }) diff --git a/internal/controller/nnf_persistentstorageinstance_controller.go b/internal/controller/nnf_persistentstorageinstance_controller.go index 421da5ce..9229d07e 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller.go +++ b/internal/controller/nnf_persistentstorageinstance_controller.go @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -153,8 +153,8 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithUserMessage("creating persistent MGT does not accept 'capacity' argument").WithFatal().WithUser() } labels := persistentStorage.GetLabels() - if _, ok := labels[nnfv1alpha3.StandaloneMGTLabel]; !ok { - labels[nnfv1alpha3.StandaloneMGTLabel] = pinnedProfile.Data.LustreStorage.StandaloneMGTPoolName + if _, ok := labels[nnfv1alpha4.StandaloneMGTLabel]; !ok { + labels[nnfv1alpha4.StandaloneMGTLabel] = pinnedProfile.Data.LustreStorage.StandaloneMGTPoolName persistentStorage.SetLabels(labels) if err := r.Update(ctx, persistentStorage); err != nil { if !apierrors.IsConflict(err) { @@ -193,7 +193,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } else if persistentStorage.Spec.State == dwsv1alpha2.PSIStateActive { // Wait for the NnfStorage to be ready before marking the persistent storage // state as "active" - nnfStorage := &nnfv1alpha3.NnfStorage{} + nnfStorage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, req.NamespacedName, nnfStorage); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -261,9 +261,9 @@ func (r *PersistentStorageReconciler) createServers(ctx context.Context, persist // SetupWithManager sets up the controller with the Manager. func (r *PersistentStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfStorageList{}, + &nnfv1alpha4.NnfStorageList{}, &dwsv1alpha2.ServersList{}, - &nnfv1alpha3.NnfStorageProfileList{}, + &nnfv1alpha4.NnfStorageProfileList{}, } maxReconciles := runtime.GOMAXPROCS(0) @@ -271,7 +271,7 @@ func (r *PersistentStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.PersistentStorageInstance{}). Owns(&dwsv1alpha2.Servers{}). - Owns(&nnfv1alpha3.NnfStorage{}). - Owns(&nnfv1alpha3.NnfStorageProfile{}). + Owns(&nnfv1alpha4.NnfStorage{}). + Owns(&nnfv1alpha4.NnfStorageProfile{}). Complete(r) } diff --git a/internal/controller/nnf_persistentstorageinstance_controller_test.go b/internal/controller/nnf_persistentstorageinstance_controller_test.go index 7d3e922d..cb79fc40 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller_test.go +++ b/internal/controller/nnf_persistentstorageinstance_controller_test.go @@ -30,12 +30,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("PersistentStorage test", func() { var ( - storageProfile *nnfv1alpha3.NnfStorageProfile + storageProfile *nnfv1alpha4.NnfStorageProfile ) BeforeEach(func() { @@ -45,7 +45,7 @@ var _ = Describe("PersistentStorage test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) @@ -83,7 +83,7 @@ var _ = Describe("PersistentStorage test", func() { return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), servers) }).Should(Succeed(), "Create the DWS Servers Resource") - pinnedStorageProfile := &nnfv1alpha3.NnfStorageProfile{ + pinnedStorageProfile := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.GetName(), Namespace: persistentStorage.GetNamespace(), diff --git a/internal/controller/nnf_port_manager_controller.go b/internal/controller/nnf_port_manager_controller.go index f31333a8..e71af5e3 100644 --- a/internal/controller/nnf_port_manager_controller.go +++ b/internal/controller/nnf_port_manager_controller.go @@ -36,7 +36,7 @@ import ( "github.com/go-logr/logr" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // NnfPortManagerReconciler reconciles a NnfPortManager object @@ -46,8 +46,8 @@ type NnfPortManagerReconciler struct { } // type aliases for name shortening -type AllocationSpec = nnfv1alpha3.NnfPortManagerAllocationSpec -type AllocationStatus = nnfv1alpha3.NnfPortManagerAllocationStatus +type AllocationSpec = nnfv1alpha4.NnfPortManagerAllocationSpec +type AllocationStatus = nnfv1alpha4.NnfPortManagerAllocationStatus //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfportmanagers,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfportmanagers/status,verbs=get;update;patch @@ -65,13 +65,13 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque log := log.FromContext(ctx) unsatisfiedRequests := 0 - mgr := &nnfv1alpha3.NnfPortManager{} + mgr := &nnfv1alpha4.NnfPortManager{} if err := r.Get(ctx, req.NamespacedName, mgr); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Create a resource status updater to ensure the status subresource is updated. - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfPortManagerStatus](mgr) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfPortManagerStatus](mgr) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Read in the system configuration which contains the available ports. @@ -82,14 +82,14 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque }, } - mgr.Status.Status = nnfv1alpha3.NnfPortManagerStatusReady + mgr.Status.Status = nnfv1alpha4.NnfPortManagerStatusReady if err := r.Get(ctx, client.ObjectKeyFromObject(config), config); err != nil { if !errors.IsNotFound(err) { return ctrl.Result{}, err } log.Info("System Configuration not found", "config", client.ObjectKeyFromObject(config).String()) - mgr.Status.Status = nnfv1alpha3.NnfPortManagerStatusSystemConfigurationNotFound + mgr.Status.Status = nnfv1alpha4.NnfPortManagerStatusSystemConfigurationNotFound res = ctrl.Result{Requeue: true} // Force a requeue - we want the manager to go ready even if there are zero allocations } @@ -100,18 +100,18 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // allocating the desired ports. for _, spec := range mgr.Spec.Allocations { var ports []uint16 - var status nnfv1alpha3.NnfPortManagerAllocationStatusStatus - var allocationStatus *nnfv1alpha3.NnfPortManagerAllocationStatus + var status nnfv1alpha4.NnfPortManagerAllocationStatusStatus + var allocationStatus *nnfv1alpha4.NnfPortManagerAllocationStatus // If the specification is already included in the allocations and InUse, continue allocationStatus = r.findAllocationStatus(mgr, spec) - if allocationStatus != nil && allocationStatus.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInUse { + if allocationStatus != nil && allocationStatus.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInUse { continue } // Determine if the port manager is ready and find a free port - if mgr.Status.Status != nnfv1alpha3.NnfPortManagerStatusReady { - ports, status = nil, nnfv1alpha3.NnfPortManagerAllocationStatusInvalidConfiguration + if mgr.Status.Status != nnfv1alpha4.NnfPortManagerStatusReady { + ports, status = nil, nnfv1alpha4.NnfPortManagerAllocationStatusInvalidConfiguration } else { ports, status = r.findFreePorts(log, mgr, config, spec) } @@ -119,7 +119,7 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque log.Info("Allocation", "requester", spec.Requester, "count", spec.Count, "ports", ports, "status", status) // Port could not be allocated - try again next time - if status != nnfv1alpha3.NnfPortManagerAllocationStatusInUse { + if status != nnfv1alpha4.NnfPortManagerAllocationStatusInUse { unsatisfiedRequests++ log.Info("Allocation unsatisfied", "requester", spec.Requester, "count", spec.Count, "ports", ports, "status", status) } @@ -135,7 +135,7 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque spec.Requester.DeepCopyInto(allocationStatus.Requester) if mgr.Status.Allocations == nil { - mgr.Status.Allocations = make([]nnfv1alpha3.NnfPortManagerAllocationStatus, 0) + mgr.Status.Allocations = make([]nnfv1alpha4.NnfPortManagerAllocationStatus, 0) } mgr.Status.Allocations = append(mgr.Status.Allocations, allocationStatus) @@ -158,8 +158,8 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // isAllocationNeeded returns true if the provided Port Allocation Status has a matching value // requester in the specification, and false otherwise. -func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha3.NnfPortManager, status *AllocationStatus) bool { - if status.Status != nnfv1alpha3.NnfPortManagerAllocationStatusInUse && status.Status != nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources { +func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha4.NnfPortManager, status *AllocationStatus) bool { + if status.Status != nnfv1alpha4.NnfPortManagerAllocationStatusInUse && status.Status != nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources { return false } @@ -176,7 +176,7 @@ func (r *NnfPortManagerReconciler) isAllocationNeeded(mgr *nnfv1alpha3.NnfPortMa return false } -func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr *nnfv1alpha3.NnfPortManager, cooldown int) { +func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr *nnfv1alpha4.NnfPortManager, cooldown int) { // Free unused allocations. This will check if the Status.Allocations exist in // the list of desired allocations in the Spec field and mark any unused allocations @@ -193,7 +193,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr if cooldown == 0 { allocsToRemove = append(allocsToRemove, idx) log.Info("Allocation unused - removing", "requester", status.Requester, "status", status.Status) - } else if status.Status == nnfv1alpha3.NnfPortManagerAllocationStatusCooldown { + } else if status.Status == nnfv1alpha4.NnfPortManagerAllocationStatusCooldown { period := now.Sub(status.TimeUnallocated.Time) log.Info("Allocation unused - checking cooldown", "requester", status.Requester, "status", status.Status, "period", period, "time", status.TimeUnallocated.String()) if period >= time.Duration(cooldown)*time.Second { @@ -202,7 +202,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr } } else if status.TimeUnallocated == nil { status.TimeUnallocated = &now - status.Status = nnfv1alpha3.NnfPortManagerAllocationStatusCooldown + status.Status = nnfv1alpha4.NnfPortManagerAllocationStatusCooldown log.Info("Allocation unused -- cooldown set", "requester", status.Requester, "status", status.Status) } } @@ -214,7 +214,7 @@ func (r *NnfPortManagerReconciler) cleanupUnusedAllocations(log logr.Logger, mgr } } -func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha3.NnfPortManager, spec AllocationSpec) *AllocationStatus { +func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha4.NnfPortManager, spec AllocationSpec) *AllocationStatus { for idx := range mgr.Status.Allocations { status := &mgr.Status.Allocations[idx] if status.Requester == nil { @@ -231,17 +231,17 @@ func (r *NnfPortManagerReconciler) findAllocationStatus(mgr *nnfv1alpha3.NnfPort // isAllocated returns true if the provided specification is in the Port Manager's allocation // status', and false otherwise. -func (r *NnfPortManagerReconciler) isAllocated(mgr *nnfv1alpha3.NnfPortManager, spec AllocationSpec) bool { +func (r *NnfPortManagerReconciler) isAllocated(mgr *nnfv1alpha4.NnfPortManager, spec AllocationSpec) bool { return r.findAllocationStatus(mgr, spec) != nil } // Find free ports to satisfy the provided specification. -func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha3.NnfPortManager, config *dwsv1alpha2.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha3.NnfPortManagerAllocationStatusStatus) { +func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha4.NnfPortManager, config *dwsv1alpha2.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha4.NnfPortManagerAllocationStatusStatus) { portsInUse := make([]uint16, 0) for _, status := range mgr.Status.Allocations { - if status.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInUse || - status.Status == nnfv1alpha3.NnfPortManagerAllocationStatusCooldown { + if status.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInUse || + status.Status == nnfv1alpha4.NnfPortManagerAllocationStatusCooldown { portsInUse = append(portsInUse, status.Ports...) } } @@ -276,7 +276,7 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph if len(ports) >= count { log.Info("Ports claimed from system configuration", "ports", ports) - return ports[:count], nnfv1alpha3.NnfPortManagerAllocationStatusInUse + return ports[:count], nnfv1alpha4.NnfPortManagerAllocationStatusInUse } // If we still haven't found a sufficient number of free ports, free up unused allocations @@ -293,7 +293,7 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph for idx := range mgr.Status.Allocations { status := &mgr.Status.Allocations[idx] - if status.Status == nnfv1alpha3.NnfPortManagerAllocationStatusFree { + if status.Status == nnfv1alpha4.NnfPortManagerAllocationStatusFree { log.Info("Ports claimed from free list", "ports", status.Ports) // Append this values ports to the returned ports. We could over-allocate here, but @@ -314,18 +314,18 @@ func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alph for len(ports) < count { switch claimPortsFromFreeAllocation() { case exhausted: - return []uint16{}, nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources + return []uint16{}, nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources case more: // loop again if needed } } - return ports[:count], nnfv1alpha3.NnfPortManagerAllocationStatusInUse + return ports[:count], nnfv1alpha4.NnfPortManagerAllocationStatusInUse } // SetupWithManager sets up the controller with the Manager. func (r *NnfPortManagerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha3.NnfPortManager{}). + For(&nnfv1alpha4.NnfPortManager{}). Complete(r) } diff --git a/internal/controller/nnf_port_manager_controller_test.go b/internal/controller/nnf_port_manager_controller_test.go index c5508fa3..31135d02 100644 --- a/internal/controller/nnf_port_manager_controller_test.go +++ b/internal/controller/nnf_port_manager_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { @@ -47,7 +47,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { Describe("NNF Port Manager Controller Test", func() { var cfg *dwsv1alpha2.SystemConfiguration - var mgr *nnfv1alpha3.NnfPortManager + var mgr *nnfv1alpha4.NnfPortManager portCooldown := 1 JustBeforeEach(func() { @@ -73,18 +73,18 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } }) - mgr = &nnfv1alpha3.NnfPortManager{ + mgr = &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-port-manager", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfPortManagerSpec{ + Spec: nnfv1alpha4.NnfPortManagerSpec{ SystemConfiguration: corev1.ObjectReference{ Name: cfg.Name, Namespace: cfg.Namespace, Kind: reflect.TypeOf(*cfg).Name(), }, - Allocations: make([]nnfv1alpha3.NnfPortManagerAllocationSpec, 0), + Allocations: make([]nnfv1alpha4.NnfPortManagerAllocationSpec, 0), }, } Expect(k8sClient.Create(ctx, mgr)).To(Succeed()) @@ -103,10 +103,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { // Submit an allocation and verify it has been accounted for - this doesn't mean the ports // were successfully allocated, however. - allocatePorts := func(mgr *nnfv1alpha3.NnfPortManager, name string, count int) []uint16 { + allocatePorts := func(mgr *nnfv1alpha4.NnfPortManager, name string, count int) []uint16 { By(fmt.Sprintf("Reserving %d ports for '%s'", count, name)) - allocation := nnfv1alpha3.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha4.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: name}, Count: count, } @@ -129,10 +129,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } // Submit an allocation and expect it to be successfully allocated (i.e. ports InUse) - reservePorts := func(mgr *nnfv1alpha3.NnfPortManager, name string, count int) []uint16 { + reservePorts := func(mgr *nnfv1alpha4.NnfPortManager, name string, count int) []uint16 { ports := allocatePorts(mgr, name, count) - allocation := nnfv1alpha3.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha4.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: name}, Count: count, } @@ -140,16 +140,16 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { status := r.findAllocationStatus(mgr, allocation) Expect(status).ToNot(BeNil()) Expect(status.Ports).To(HaveLen(allocation.Count)) - Expect(status.Status).To(Equal(nnfv1alpha3.NnfPortManagerAllocationStatusInUse)) + Expect(status.Status).To(Equal(nnfv1alpha4.NnfPortManagerAllocationStatusInUse)) return ports } - reservePortsAllowFail := func(mgr *nnfv1alpha3.NnfPortManager, name string, count int) []uint16 { + reservePortsAllowFail := func(mgr *nnfv1alpha4.NnfPortManager, name string, count int) []uint16 { return allocatePorts(mgr, name, count) } - releasePorts := func(mgr *nnfv1alpha3.NnfPortManager, name string) { + releasePorts := func(mgr *nnfv1alpha4.NnfPortManager, name string) { By(fmt.Sprintf("Releasing ports for '%s'", name)) requester := corev1.ObjectReference{Name: name} @@ -170,7 +170,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { // Simple way to fire the reconciler to test the cooldown handling // without having to reserve new ports. This is just to limit the scope // of the test. - kickPortManager := func(mgr *nnfv1alpha3.NnfPortManager) { + kickPortManager := func(mgr *nnfv1alpha4.NnfPortManager) { By("Kicking port manager to force reconcile") Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgr), mgr)).To(Succeed()) @@ -183,7 +183,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { } // Verify the number of allocations in the status allocation list that are InUse - verifyNumAllocations := func(mgr *nnfv1alpha3.NnfPortManager, status nnfv1alpha3.NnfPortManagerAllocationStatusStatus, count int) { + verifyNumAllocations := func(mgr *nnfv1alpha4.NnfPortManager, status nnfv1alpha4.NnfPortManagerAllocationStatusStatus, count int) { By(fmt.Sprintf("Verifying there are %d allocations with Status %s in the status allocation list", count, status)) Eventually(func() int { @@ -198,16 +198,16 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { }).Should(Equal(count)) } - verifyNumAllocationsInUse := func(mgr *nnfv1alpha3.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha3.NnfPortManagerAllocationStatusInUse, count) + verifyNumAllocationsInUse := func(mgr *nnfv1alpha4.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha4.NnfPortManagerAllocationStatusInUse, count) } - verifyNumAllocationsCooldown := func(mgr *nnfv1alpha3.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha3.NnfPortManagerAllocationStatusCooldown, count) + verifyNumAllocationsCooldown := func(mgr *nnfv1alpha4.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha4.NnfPortManagerAllocationStatusCooldown, count) } - verifyNumAllocationsInsuffientResources := func(mgr *nnfv1alpha3.NnfPortManager, count int) { - verifyNumAllocations(mgr, nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources, count) + verifyNumAllocationsInsuffientResources := func(mgr *nnfv1alpha4.NnfPortManager, count int) { + verifyNumAllocations(mgr, nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources, count) } waitForCooldown := func(extra int) { @@ -226,10 +226,10 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { kickPortManager(mgr) - Eventually(func() nnfv1alpha3.NnfPortManagerStatusStatus { + Eventually(func() nnfv1alpha4.NnfPortManagerStatusStatus { k8sClient.Get(ctx, client.ObjectKeyFromObject(mgr), mgr) return mgr.Status.Status - }).Should(Equal(nnfv1alpha3.NnfPortManagerStatusSystemConfigurationNotFound)) + }).Should(Equal(nnfv1alpha4.NnfPortManagerStatusSystemConfigurationNotFound)) }) }) @@ -334,7 +334,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { const name = "all" reservePorts(mgr, name, portEnd-portStart+1) - allocation := nnfv1alpha3.NnfPortManagerAllocationSpec{ + allocation := nnfv1alpha4.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{Name: "insufficient-resources"}, Count: 1, } @@ -353,7 +353,7 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { status := r.findAllocationStatus(mgr, allocation) Expect(status).ToNot(BeNil()) Expect(status.Ports).To(BeEmpty()) - Expect(status.Status).To(Equal(nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources)) + Expect(status.Status).To(Equal(nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources)) }) }) @@ -388,12 +388,12 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { By("Attempting to reserve an additional port and failing") ports := reservePortsAllowFail(mgr, "waiting", 1) - allocation := nnfv1alpha3.NnfPortManagerAllocationSpec{Requester: corev1.ObjectReference{Name: "waiting"}, Count: 1} + allocation := nnfv1alpha4.NnfPortManagerAllocationSpec{Requester: corev1.ObjectReference{Name: "waiting"}, Count: 1} status := r.findAllocationStatus(mgr, allocation) Expect(ports).To(HaveLen(0)) Expect(status).ToNot(BeNil()) - Expect(status.Status).To(Equal(nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources)) + Expect(status.Status).To(Equal(nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources)) verifyNumAllocationsInUse(mgr, portTotal) verifyNumAllocationsInsuffientResources(mgr, 1) diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index 360ae828..05e805cb 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -44,7 +44,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -99,7 +99,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) log := r.Log.WithValues("NnfStorage", req.NamespacedName) metrics.NnfStorageReconcilesTotal.Inc() - storage := &nnfv1alpha3.NnfStorage{} + storage := &nnfv1alpha4.NnfStorage{} if err := r.Get(ctx, req.NamespacedName, storage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -110,7 +110,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create an updater for the entire node. This will handle calls to r.Status().Update() such // that we can repeatedly make calls to the internal update method, with the final update // occuring on the on function exit. - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfStorageStatus](storage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfStorageStatus](storage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { storage.Status.SetResourceErrorAndLog(err, log) }() @@ -160,7 +160,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Initialize the status section of the NnfStorage if it hasn't been done already. if len(storage.Status.AllocationSets) != len(storage.Spec.AllocationSets) { - storage.Status.AllocationSets = make([]nnfv1alpha3.NnfStorageAllocationSetStatus, len(storage.Spec.AllocationSets)) + storage.Status.AllocationSets = make([]nnfv1alpha4.NnfStorageAllocationSetStatus, len(storage.Spec.AllocationSets)) for i := range storage.Status.AllocationSets { storage.Status.AllocationSets[i].Ready = false } @@ -256,7 +256,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, persistentMgsReference corev1.ObjectReference) error { +func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage, persistentMgsReference corev1.ObjectReference) error { persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, @@ -276,7 +276,7 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), } for _, existingReference := range persistentStorage.Spec.ConsumerReferences { @@ -290,7 +290,7 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context return r.Update(ctx, persistentStorage) } -func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, persistentMgsReference corev1.ObjectReference) error { +func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage, persistentMgsReference corev1.ObjectReference) error { persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, @@ -306,7 +306,7 @@ func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Cont reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), } for i, existingReference := range persistentStorage.Spec.ConsumerReferences { @@ -319,7 +319,7 @@ func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Cont return nil } -func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) allocationSet := nnfStorage.Spec.AllocationSets[allocationSetIndex] @@ -327,7 +327,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeBlockStorage := &nnfv1alpha3.NnfNodeBlockStorage{ + nnfNodeBlockStorage := &nnfv1alpha4.NnfNodeBlockStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(nnfStorage, allocationSetIndex, i), Namespace: node.Name, @@ -340,7 +340,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt dwsv1alpha2.AddOwnerLabels(nnfNodeBlockStorage, nnfStorage) labels := nnfNodeBlockStorage.GetLabels() - labels[nnfv1alpha3.AllocationSetLabel] = allocationSet.Name + labels[nnfv1alpha4.AllocationSetLabel] = allocationSet.Name nnfNodeBlockStorage.SetLabels(labels) expectedAllocations := node.Count @@ -350,7 +350,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt nnfNodeBlockStorage.Spec.SharedAllocation = allocationSet.SharedAllocation if len(nnfNodeBlockStorage.Spec.Allocations) == 0 { - nnfNodeBlockStorage.Spec.Allocations = make([]nnfv1alpha3.NnfNodeBlockStorageAllocationSpec, expectedAllocations) + nnfNodeBlockStorage.Spec.Allocations = make([]nnfv1alpha4.NnfNodeBlockStorageAllocationSpec, expectedAllocations) } if len(nnfNodeBlockStorage.Spec.Allocations) != expectedAllocations { @@ -400,15 +400,15 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt // Get the status from all the child NnfNodeBlockStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: nnfStorage.Name, Namespace: nnfStorage.Namespace}) allocationSet := &nnfStorage.Status.AllocationSets[allocationSetIndex] allocationSet.AllocationCount = 0 - nnfNodeBlockStorageList := &nnfv1alpha3.NnfNodeBlockStorageList{} + nnfNodeBlockStorageList := &nnfv1alpha4.NnfNodeBlockStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) - matchLabels[nnfv1alpha3.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name + matchLabels[nnfv1alpha4.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ matchLabels, @@ -426,7 +426,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte // prune out any entries that aren't in the NnfStorage. This can happen if the NnfStorage was modified // after it was created, as is the case with NnfStorages from an NnfSystemStorage - nnfNodeBlockStorages := []nnfv1alpha3.NnfNodeBlockStorage{} + nnfNodeBlockStorages := []nnfv1alpha4.NnfNodeBlockStorage{} for _, nnfNodeBlockStorage := range nnfNodeBlockStorageList.Items { if _, exists := nodeNameMap[nnfNodeBlockStorage.GetNamespace()]; exists { nnfNodeBlockStorages = append(nnfNodeBlockStorages, nnfNodeBlockStorage) @@ -491,7 +491,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte // Create an NnfNodeStorage if it doesn't exist, or update it if it requires updating. Each // Rabbit node gets an NnfNodeStorage, and there may be multiple allocations requested in it. // This limits the number of resources that have to be broadcast to the Rabbits. -func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *nnfv1alpha3.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *nnfv1alpha4.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) if storage.Spec.FileSystemType == "lustre" { @@ -514,7 +514,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n } if mgsNode != "" { - nnfNode := &nnfv1alpha3.NnfNode{ + nnfNode := &nnfv1alpha4.NnfNode{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: mgsNode, @@ -535,12 +535,12 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n // Create the NnfLustreMGT resource if this allocation set is for an MGT allocationSet := storage.Spec.AllocationSets[allocationSetIndex] if allocationSet.TargetType == "mgt" || allocationSet.TargetType == "mgtmdt" { - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: storage.GetName(), Namespace: mgsNode, }, - Spec: nnfv1alpha3.NnfLustreMGTSpec{ + Spec: nnfv1alpha4.NnfLustreMGTSpec{ Addresses: []string{mgsAddress}, FsNameStart: "aaaaaaaa", }, @@ -576,7 +576,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n startIndex := 0 for i, node := range allocationSet.Nodes { // Per Rabbit namespace. - nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ + nnfNodeStorage := &nnfv1alpha4.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfNodeStorageName(storage, allocationSetIndex, i), Namespace: node.Name, @@ -589,13 +589,13 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n dwsv1alpha2.AddOwnerLabels(nnfNodeStorage, storage) labels := nnfNodeStorage.GetLabels() - labels[nnfv1alpha3.AllocationSetLabel] = allocationSet.Name + labels[nnfv1alpha4.AllocationSetLabel] = allocationSet.Name nnfNodeStorage.SetLabels(labels) nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ Name: nnfNodeStorageName(storage, allocationSetIndex, i), Namespace: node.Name, - Kind: reflect.TypeOf(nnfv1alpha3.NnfNodeBlockStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfNodeBlockStorage{}).Name(), } nnfNodeStorage.Spec.Capacity = allocationSet.Capacity nnfNodeStorage.Spec.UserID = storage.Spec.UserID @@ -642,12 +642,12 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha3.NnfStorage, allocationSetIndex int, deleting bool) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha4.NnfStorage, allocationSetIndex int, deleting bool) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) - nnfNodeStorageList := &nnfv1alpha3.NnfNodeStorageList{} + nnfNodeStorageList := &nnfv1alpha4.NnfNodeStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) - matchLabels[nnfv1alpha3.AllocationSetLabel] = storage.Spec.AllocationSets[allocationSetIndex].Name + matchLabels[nnfv1alpha4.AllocationSetLabel] = storage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ matchLabels, @@ -665,7 +665,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s // prune out any entries that aren't in the NnfStorage. This can happen if the NnfStorage was modified // after it was created, as is the case with NnfStorages from an NnfSystemStorage - nnfNodeStorages := []nnfv1alpha3.NnfNodeStorage{} + nnfNodeStorages := []nnfv1alpha4.NnfNodeStorage{} for _, nnfNodeStorage := range nnfNodeStorageList.Items { if _, exists := nodeNameMap[nnfNodeStorage.GetNamespace()]; exists { nnfNodeStorages = append(nnfNodeStorages, nnfNodeStorage) @@ -726,9 +726,9 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s return nil, nil } -func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage) (*nnfv1alpha3.NnfLustreMGT, error) { +func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage) (*nnfv1alpha4.NnfLustreMGT, error) { if nnfStorage.Status.LustreMgtReference != (corev1.ObjectReference{}) { - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: nnfStorage.Status.LustreMgtReference.Name, Namespace: nnfStorage.Status.LustreMgtReference.Namespace, @@ -742,12 +742,12 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf return nnfLustreMgt, nil } - nnfLustreMgtList := &nnfv1alpha3.NnfLustreMGTList{} + nnfLustreMgtList := &nnfv1alpha4.NnfLustreMGTList{} if err := r.List(ctx, nnfLustreMgtList, []client.ListOption{}...); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfLustreMGTs").WithError(err).WithMajor() } - var nnfLustreMgt *nnfv1alpha3.NnfLustreMGT = nil + var nnfLustreMgt *nnfv1alpha4.NnfLustreMGT = nil for i := range nnfLustreMgtList.Items { if func(list []string, search string) bool { for _, element := range list { @@ -774,7 +774,7 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf return nnfLustreMgt, nil } -func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage) (string, error) { +func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage) (string, error) { nnfLustreMgt, err := r.getLustreMgt(ctx, nnfStorage) if err != nil { return "", dwsv1alpha2.NewResourceError("could not get NnfLustreMGT for address: %s", nnfStorage.Status.MgsAddress).WithError(err) @@ -785,7 +785,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a nnfStorage.Status.LustreMgtReference = corev1.ObjectReference{ Name: nnfLustreMgt.Name, Namespace: nnfLustreMgt.Namespace, - Kind: reflect.TypeOf(nnfv1alpha3.NnfLustreMGT{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfLustreMGT{}).Name(), } // This will update the status section of the NnfStorage with the reference and requeue @@ -795,7 +795,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a reference := corev1.ObjectReference{ Name: nnfStorage.Name, Namespace: nnfStorage.Namespace, - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), } // Check the status section of the NnfLustreMGT to see if an fsname has been assigned yet @@ -826,7 +826,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a } -func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage) (*ctrl.Result, error) { +func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStorage *nnfv1alpha4.NnfStorage) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) // Don't create the clientmount in the test environment. Some tests don't fake out the @@ -985,7 +985,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. -func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha3.NnfStorage, deleting bool) error { +func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha4.NnfStorage, deleting bool) error { clientMountList := &dwsv1alpha2.ClientMountList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) @@ -1015,7 +1015,7 @@ func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, s // or the object references in the storage resource. We may have created children // that aren't in the cache and we may not have been able to add the object reference // to the NnfStorage. -func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnfv1alpha3.NnfStorage) (nodeStoragesState, error) { +func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnfv1alpha4.NnfStorage) (nodeStoragesState, error) { // Delete any clientmounts that were created by the NnfStorage. deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, []dwsv1alpha2.ObjectList{&dwsv1alpha2.ClientMountList{}}, storage) if err != nil { @@ -1034,15 +1034,15 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // Delete the OSTs and MDTs first so we can drop the claim on the NnfLustreMgt resource. This will trigger // an lctl command to run to remove the fsname from the MGT. childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfNodeStorageList{}, + &nnfv1alpha4.NnfNodeStorageList{}, } - ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "ost"}) + ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha4.AllocationSetLabel: "ost"}) if err != nil { return nodeStoragesExist, err } - mdtDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "mdt"}) + mdtDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha4.AllocationSetLabel: "mdt"}) if err != nil { return nodeStoragesExist, err } @@ -1106,7 +1106,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // releaseLustreMGT removes the claim from NnfLustreMGT and returns "true" once the NnfLustreMGT has removed // the entry from the status section, indicating that the fsname has been removed from the MGT -func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nnfv1alpha3.NnfStorage) (bool, error) { +func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nnfv1alpha4.NnfStorage) (bool, error) { if storage.Spec.FileSystemType != "lustre" { return true, nil } @@ -1115,7 +1115,7 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn return true, nil } - nnfLustreMgt := &nnfv1alpha3.NnfLustreMGT{ + nnfLustreMgt := &nnfv1alpha4.NnfLustreMGT{ ObjectMeta: metav1.ObjectMeta{ Name: storage.Status.LustreMgtReference.Name, Namespace: storage.Status.LustreMgtReference.Namespace, @@ -1157,7 +1157,7 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn // - NnfStorages from multiple namespaces create NnfNodeStorages in the same namespace // - Different allocations in an NnfStorage could be targeting the same Rabbit node (e.g., MGS and MDS on the same Rabbit) // - The same Rabbit node could be listed more than once within the same allocation. -func nnfNodeStorageName(storage *nnfv1alpha3.NnfStorage, allocationSetIndex int, i int) string { +func nnfNodeStorageName(storage *nnfv1alpha4.NnfStorage, allocationSetIndex int, i int) string { nodeName := storage.Spec.AllocationSets[allocationSetIndex].Nodes[i].Name // If the same Rabbit is listed more than once, the index on the end of the name needs to show @@ -1180,18 +1180,18 @@ func nnfNodeStorageName(storage *nnfv1alpha3.NnfStorage, allocationSetIndex int, func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ &dwsv1alpha2.ClientMountList{}, - &nnfv1alpha3.NnfNodeStorageList{}, - &nnfv1alpha3.NnfNodeBlockStorageList{}, - &nnfv1alpha3.NnfLustreMGTList{}, - &nnfv1alpha3.NnfStorageProfileList{}, + &nnfv1alpha4.NnfNodeStorageList{}, + &nnfv1alpha4.NnfNodeBlockStorageList{}, + &nnfv1alpha4.NnfLustreMGTList{}, + &nnfv1alpha4.NnfStorageProfileList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&nnfv1alpha3.NnfStorage{}). - Watches(&nnfv1alpha3.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha3.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + For(&nnfv1alpha4.NnfStorage{}). + Watches(&nnfv1alpha4.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha4.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_systemconfiguration_controller.go b/internal/controller/nnf_systemconfiguration_controller.go index 566307ee..6adde026 100644 --- a/internal/controller/nnf_systemconfiguration_controller.go +++ b/internal/controller/nnf_systemconfiguration_controller.go @@ -38,7 +38,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -220,12 +220,12 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, } taint := &corev1.Taint{ - Key: nnfv1alpha3.RabbitNodeTaintKey, + Key: nnfv1alpha4.RabbitNodeTaintKey, Value: "true", } staleLabel := false - _, hasCompletedLabel := labels[nnfv1alpha3.TaintsAndLabelsCompletedLabel] + _, hasCompletedLabel := labels[nnfv1alpha4.TaintsAndLabelsCompletedLabel] if effect == corev1.TaintEffectNoSchedule && hasCompletedLabel { // We're in pass 1. // The presence of the label means that the taint state has been @@ -251,7 +251,7 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, continue } // Clear the label and continue working on this node. - delete(labels, nnfv1alpha3.TaintsAndLabelsCompletedLabel) + delete(labels, nnfv1alpha4.TaintsAndLabelsCompletedLabel) node.SetLabels(labels) } else if hasCompletedLabel { // All other passes honor the label. @@ -267,7 +267,7 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, return false, err } // All passes completed on this node. - labels[nnfv1alpha3.TaintsAndLabelsCompletedLabel] = "true" + labels[nnfv1alpha4.TaintsAndLabelsCompletedLabel] = "true" doUpdate = true node.SetLabels(labels) } else { @@ -281,8 +281,8 @@ func (r *NnfSystemConfigurationReconciler) labelsAndTaints(ctx context.Context, } // Add the label. - if _, present := labels[nnfv1alpha3.RabbitNodeSelectorLabel]; !present { - labels[nnfv1alpha3.RabbitNodeSelectorLabel] = "true" + if _, present := labels[nnfv1alpha4.RabbitNodeSelectorLabel]; !present { + labels[nnfv1alpha4.RabbitNodeSelectorLabel] = "true" doUpdate = true node.SetLabels(labels) } diff --git a/internal/controller/nnf_systemconfiguration_controller_test.go b/internal/controller/nnf_systemconfiguration_controller_test.go index a9d2e886..61828007 100644 --- a/internal/controller/nnf_systemconfiguration_controller_test.go +++ b/internal/controller/nnf_systemconfiguration_controller_test.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("NnfSystemconfigurationController", func() { @@ -80,12 +80,12 @@ var _ = Describe("Adding taints and labels to nodes", func() { var sysCfg *dwsv1alpha2.SystemConfiguration taintNoSchedule := &corev1.Taint{ - Key: nnfv1alpha3.RabbitNodeTaintKey, + Key: nnfv1alpha4.RabbitNodeTaintKey, Value: "true", Effect: corev1.TaintEffectNoSchedule, } taintNoExecute := &corev1.Taint{ - Key: nnfv1alpha3.RabbitNodeTaintKey, + Key: nnfv1alpha4.RabbitNodeTaintKey, Value: "true", Effect: corev1.TaintEffectNoExecute, } @@ -147,8 +147,8 @@ var _ = Describe("Adding taints and labels to nodes", func() { Eventually(func(g Gomega) { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node), tnode)) labels := tnode.GetLabels() - g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha3.RabbitNodeSelectorLabel, "true")) - g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha3.TaintsAndLabelsCompletedLabel, "true")) + g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha4.RabbitNodeSelectorLabel, "true")) + g.Expect(labels).To(HaveKeyWithValue(nnfv1alpha4.TaintsAndLabelsCompletedLabel, "true")) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoSchedule)).To(BeTrue()) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoExecute)).To(BeFalse()) }).Should(Succeed(), "verify failed for node %s", node.Name) @@ -167,7 +167,7 @@ var _ = Describe("Adding taints and labels to nodes", func() { // Remove the "cleared" label from node1. Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node1), node1)) labels := node1.GetLabels() - delete(labels, nnfv1alpha3.TaintsAndLabelsCompletedLabel) + delete(labels, nnfv1alpha4.TaintsAndLabelsCompletedLabel) node1.SetLabels(labels) Expect(k8sClient.Update(context.TODO(), node1)).To(Succeed()) By("verifying node1 is repaired") @@ -227,8 +227,8 @@ var _ = Describe("Adding taints and labels to nodes", func() { tnode := &corev1.Node{} g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node4), tnode)) labels := tnode.GetLabels() - g.Expect(labels).ToNot(HaveKey(nnfv1alpha3.RabbitNodeSelectorLabel)) - g.Expect(labels).ToNot(HaveKey(nnfv1alpha3.TaintsAndLabelsCompletedLabel)) + g.Expect(labels).ToNot(HaveKey(nnfv1alpha4.RabbitNodeSelectorLabel)) + g.Expect(labels).ToNot(HaveKey(nnfv1alpha4.TaintsAndLabelsCompletedLabel)) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoSchedule)).To(BeFalse()) g.Expect(taints.TaintExists(tnode.Spec.Taints, taintNoExecute)).To(BeFalse()) }).Should(Succeed(), "verify failed for node %s", node4.Name) diff --git a/internal/controller/nnf_workflow_controller.go b/internal/controller/nnf_workflow_controller.go index f2da5c31..a1e31a9f 100644 --- a/internal/controller/nnf_workflow_controller.go +++ b/internal/controller/nnf_workflow_controller.go @@ -47,7 +47,7 @@ import ( "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -451,7 +451,7 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) // Check whether the NnfStorage has finished creating the storage. - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -501,7 +501,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // Prepare the provided staging parameter for data-movement. Param is the source/destination value from the #DW copy_in/copy_out directive; based // on the param prefix we determine the storage instance and access requirements for data movement. - prepareStagingArgumentFn := func(param string) (*corev1.ObjectReference, *nnfv1alpha3.NnfAccess, *result, error) { + prepareStagingArgumentFn := func(param string) (*corev1.ObjectReference, *nnfv1alpha4.NnfAccess, *result, error) { var storageReference *corev1.ObjectReference name, _ := splitStagingArgumentIntoNameAndPath(param) @@ -532,7 +532,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo nnfStorageName = indexedResourceName(workflow, parentDwIndex) } - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfStorageName, Namespace: workflow.Namespace, @@ -544,7 +544,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } storageReference = &corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, } @@ -609,7 +609,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } // Wait for accesses to go ready - for _, access := range []*nnfv1alpha3.NnfAccess{sourceAccess, destAccess} { + for _, access := range []*nnfv1alpha4.NnfAccess{sourceAccess, destAccess} { if access != nil { if err := r.Get(ctx, client.ObjectKeyFromObject(access), access); err != nil { return nil, dwsv1alpha2.NewResourceError("could not get NnfAccess %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not create data movement mount points") @@ -635,9 +635,9 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } // Verify data movement is ready - dmm := &nnfv1alpha3.NnfDataMovementManager{ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha3.DataMovementManagerName, - Namespace: nnfv1alpha3.DataMovementNamespace, + dmm := &nnfv1alpha4.NnfDataMovementManager{ObjectMeta: metav1.ObjectMeta{ + Name: nnfv1alpha4.DataMovementManagerName, + Namespace: nnfv1alpha4.DataMovementNamespace, }} if err := r.Get(ctx, client.ObjectKeyFromObject(dmm), dmm); err != nil { return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementManager %v", client.ObjectKeyFromObject(dmm)).WithError(err).WithUserMessage("could not determine data movement readiness") @@ -657,7 +657,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo targetStorageRef = sourceStorage } - targetStorage := &nnfv1alpha3.NnfStorage{ + targetStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: targetStorageRef.Name, Namespace: targetStorageRef.Namespace, @@ -678,7 +678,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementProfile %s", indexedResourceName(workflow, index)).WithError(err).WithUserMessage("could not find data movement profile") } dmProfileRef := corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha3.NnfDataMovementProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfDataMovementProfile{}).Name(), Name: dmProfile.Name, Namespace: dmProfile.Namespace, } @@ -697,17 +697,17 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo for _, node := range nodes { for i := 0; i < node.Count; i++ { - dm := &nnfv1alpha3.NnfDataMovement{ + dm := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", indexedResourceName(workflow, index), i), Namespace: node.Name, }, - Spec: nnfv1alpha3.NnfDataMovementSpec{ - Source: &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + Spec: nnfv1alpha4.NnfDataMovementSpec{ + Source: &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, sourceStorage, sourceAccess, source, node.Name, i), StorageReference: *sourceStorage, }, - Destination: &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + Destination: &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, destStorage, destAccess, dest, node.Name, i), StorageReference: *destStorage, }, @@ -719,8 +719,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) - nnfv1alpha3.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) + nnfv1alpha4.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) + nnfv1alpha4.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) @@ -735,17 +735,17 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo case "lustre": - dm := &nnfv1alpha3.NnfDataMovement{ + dm := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), - Namespace: nnfv1alpha3.DataMovementNamespace, + Namespace: nnfv1alpha4.DataMovementNamespace, }, - Spec: nnfv1alpha3.NnfDataMovementSpec{ - Source: &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + Spec: nnfv1alpha4.NnfDataMovementSpec{ + Source: &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, sourceStorage, sourceAccess, source, "", 0), StorageReference: *sourceStorage, }, - Destination: &nnfv1alpha3.NnfDataMovementSpecSourceDestination{ + Destination: &nnfv1alpha4.NnfDataMovementSpecSourceDestination{ Path: getRabbitRelativePath(fsType, destStorage, destAccess, dest, "", 0), StorageReference: *destStorage, }, @@ -757,8 +757,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo dwsv1alpha2.AddWorkflowLabels(dm, workflow) dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) - nnfv1alpha3.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) + nnfv1alpha4.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) + nnfv1alpha4.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) @@ -778,10 +778,10 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl // Wait for data movement resources to complete matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) - matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(workflow.Status.State) + matchingLabels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(index) + matchingLabels[nnfv1alpha4.DataMovementTeardownStateLabel] = string(workflow.Status.State) - dataMovementList := &nnfv1alpha3.NnfDataMovementList{} + dataMovementList := &nnfv1alpha4.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } @@ -793,7 +793,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha3.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha4.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } } @@ -801,7 +801,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl // Check results of data movement operations // TODO: Detailed Fail Message? for _, dm := range dataMovementList.Items { - if dm.Status.Status != nnfv1alpha3.DataMovementConditionReasonSuccess { + if dm.Status.Status != nnfv1alpha4.DataMovementConditionReasonSuccess { handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("").WithUserMessage( fmt.Sprintf("data movement operation failed during '%s', message: %s", workflow.Status.State, dm.Status.Message)). WithFatal(), workflow, index) @@ -848,7 +848,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + "-computes", Namespace: workflow.Namespace, @@ -884,7 +884,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * // which shares the same name with the NNFStorage. Name: name, Namespace: namespace, - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), } return ctrl.SetControllerReference(workflow, access, r.Scheme) @@ -914,7 +914,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * if fsType == "gfs2" || fsType == "lustre" { name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) - storage := &nnfv1alpha3.NnfStorage{ + storage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -993,15 +993,15 @@ func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow // Wait for data movement resources to complete matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels[nnfv1alpha4.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) - dataMovementList := &nnfv1alpha3.NnfDataMovementList{} + dataMovementList := &nnfv1alpha4.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha3.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha4.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } } @@ -1038,19 +1038,19 @@ func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow // Any user created copy-offload data movement requests created during run must report any errors to the workflow. // TODO: Customer asked if this could be optional matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels[nnfv1alpha4.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) - dataMovementList := &nnfv1alpha3.NnfDataMovementList{} + dataMovementList := &nnfv1alpha4.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { - if dm.Status.State != nnfv1alpha3.DataMovementConditionTypeFinished { + if dm.Status.State != nnfv1alpha4.DataMovementConditionTypeFinished { return Requeue("pending data movement").withObject(&dm), nil } - if dm.Status.Status == nnfv1alpha3.DataMovementConditionReasonFailed { + if dm.Status.Status == nnfv1alpha4.DataMovementConditionReasonFailed { handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("data movement %v failed", client.ObjectKeyFromObject(&dm)).WithUserMessage("data movement failed").WithFatal(), workflow, index) return Requeue("error").withObject(&dm), nil } @@ -1073,11 +1073,11 @@ func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow // copy_in/out directives can reference NnfStorage from a different directive, so all the NnfAccesses // need to be removed first. childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfDataMovementList{}, - &nnfv1alpha3.NnfAccessList{}, + &nnfv1alpha4.NnfDataMovementList{}, + &nnfv1alpha4.NnfAccessList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha4.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not delete NnfDataMovement and NnfAccess children").WithError(err).WithUserMessage("could not stop data movement and unmount file systems") } @@ -1114,7 +1114,7 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo persistentStorage.SetOwnerReferences([]metav1.OwnerReference{}) dwsv1alpha2.RemoveOwnerLabels(persistentStorage) labels := persistentStorage.GetLabels() - delete(labels, nnfv1alpha3.DirectiveIndexLabel) + delete(labels, nnfv1alpha4.DirectiveIndexLabel) persistentStorage.SetLabels(labels) err = r.Update(ctx, persistentStorage) @@ -1200,11 +1200,11 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo } childObjects := []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfStorageList{}, + &nnfv1alpha4.NnfStorageList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha4.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not delete NnfStorage and PersistentStorageInstance children").WithError(err).WithUserMessage("could not delete storage allocations") } @@ -1219,9 +1219,9 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo // SetupWithManager sets up the controller with the Manager. func (r *NnfWorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfDataMovementList{}, - &nnfv1alpha3.NnfAccessList{}, - &nnfv1alpha3.NnfStorageList{}, + &nnfv1alpha4.NnfDataMovementList{}, + &nnfv1alpha4.NnfAccessList{}, + &nnfv1alpha4.NnfStorageList{}, &dwsv1alpha2.PersistentStorageInstanceList{}, &dwsv1alpha2.DirectiveBreakdownList{}, } @@ -1230,10 +1230,10 @@ func (r *NnfWorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&dwsv1alpha2.Workflow{}). - Owns(&nnfv1alpha3.NnfAccess{}). + Owns(&nnfv1alpha4.NnfAccess{}). Owns(&dwsv1alpha2.DirectiveBreakdown{}). Owns(&dwsv1alpha2.PersistentStorageInstance{}). - Watches(&nnfv1alpha3.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha3.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha4.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha4.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_workflow_controller_container_helpers.go b/internal/controller/nnf_workflow_controller_container_helpers.go index ec63d961..a0579dcb 100644 --- a/internal/controller/nnf_workflow_controller_container_helpers.go +++ b/internal/controller/nnf_workflow_controller_container_helpers.go @@ -26,7 +26,7 @@ import ( "strings" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/go-logr/logr" mpicommonv1 "github.com/kubeflow/common/pkg/apis/common/v1" mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -42,7 +42,7 @@ import ( type nnfUserContainer struct { workflow *dwsv1alpha2.Workflow - profile *nnfv1alpha3.NnfContainerProfile + profile *nnfv1alpha4.NnfContainerProfile nnfNodes []string volumes []nnfContainerVolume username string @@ -77,7 +77,7 @@ func (c *nnfUserContainer) createMPIJob() error { } c.profile.Data.MPISpec.DeepCopyInto(&mpiJob.Spec) - c.username = nnfv1alpha3.ContainerMPIUser + c.username = nnfv1alpha4.ContainerMPIUser if err := c.applyLabels(&mpiJob.ObjectMeta); err != nil { return err @@ -250,10 +250,10 @@ func (c *nnfUserContainer) applyLabels(job metav1.Object) error { dwsv1alpha2.AddWorkflowLabels(job, c.workflow) labels := job.GetLabels() - labels[nnfv1alpha3.ContainerLabel] = c.workflow.Name - labels[nnfv1alpha3.PinnedContainerProfileLabelName] = c.profile.GetName() - labels[nnfv1alpha3.PinnedContainerProfileLabelNameSpace] = c.profile.GetNamespace() - labels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(c.index) + labels[nnfv1alpha4.ContainerLabel] = c.workflow.Name + labels[nnfv1alpha4.PinnedContainerProfileLabelName] = c.profile.GetName() + labels[nnfv1alpha4.PinnedContainerProfileLabelNameSpace] = c.profile.GetNamespace() + labels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(c.index) job.SetLabels(labels) if err := ctrl.SetControllerReference(c.workflow, job, c.scheme); err != nil { @@ -266,7 +266,7 @@ func (c *nnfUserContainer) applyLabels(job metav1.Object) error { func (c *nnfUserContainer) applyTolerations(spec *corev1.PodSpec) { spec.Tolerations = append(spec.Tolerations, corev1.Toleration{ Effect: corev1.TaintEffectNoSchedule, - Key: nnfv1alpha3.RabbitNodeTaintKey, + Key: nnfv1alpha4.RabbitNodeTaintKey, Operator: corev1.TolerationOpEqual, Value: "true", }) @@ -440,7 +440,7 @@ func (c *nnfUserContainer) getHostPorts() ([]uint16, error) { // Get the ports from the port manager for this workflow for _, alloc := range pm.Status.Allocations { - if alloc.Requester != nil && alloc.Requester.UID == c.workflow.UID && alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInUse { + if alloc.Requester != nil && alloc.Requester.UID == c.workflow.UID && alloc.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInUse { ports = append(ports, alloc.Ports...) } } diff --git a/internal/controller/nnf_workflow_controller_helpers.go b/internal/controller/nnf_workflow_controller_helpers.go index cbaa96d2..de045059 100644 --- a/internal/controller/nnf_workflow_controller_helpers.go +++ b/internal/controller/nnf_workflow_controller_helpers.go @@ -34,7 +34,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/go-logr/logr" mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -554,8 +554,8 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d } -func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha2.Workflow, s *dwsv1alpha2.Servers, index int, log logr.Logger) (*nnfv1alpha3.NnfStorage, error) { - nnfStorage := &nnfv1alpha3.NnfStorage{ +func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha2.Workflow, s *dwsv1alpha2.Servers, index int, log logr.Logger) (*nnfv1alpha4.NnfStorage, error) { + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name, Namespace: s.Namespace, @@ -644,11 +644,11 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * } // Need to remove all of the AllocationSets in the NnfStorage object before we begin - nnfStorage.Spec.AllocationSets = []nnfv1alpha3.NnfStorageAllocationSetSpec{} + nnfStorage.Spec.AllocationSets = []nnfv1alpha4.NnfStorageAllocationSetSpec{} // Iterate the Servers data elements to pull out the allocation sets for the server for i := range s.Spec.AllocationSets { - nnfAllocSet := nnfv1alpha3.NnfStorageAllocationSetSpec{} + nnfAllocSet := nnfv1alpha4.NnfStorageAllocationSetSpec{} nnfAllocSet.Name = s.Spec.AllocationSets[i].Label nnfAllocSet.Capacity = s.Spec.AllocationSets[i].AllocationSize @@ -668,12 +668,12 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * // If there are multiple allocations on the first MGTMDT node, split it out into two seperate // node entries. The first is a single allocation that will be used for the MGTMDT. The remaining // allocations on the node will be MDTs only. - node := nnfv1alpha3.NnfStorageAllocationNodes{Name: storage.Name, Count: 1} + node := nnfv1alpha4.NnfStorageAllocationNodes{Name: storage.Name, Count: 1} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) - node = nnfv1alpha3.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount - 1} + node = nnfv1alpha4.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount - 1} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) } else { - node := nnfv1alpha3.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} + node := nnfv1alpha4.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} nnfAllocSet.Nodes = append(nnfAllocSet.Nodes, node) } } @@ -703,7 +703,7 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool string) (corev1.ObjectReference, string, error) { persistentStorageList := &dwsv1alpha2.PersistentStorageInstanceList{} - if err := r.List(ctx, persistentStorageList, client.MatchingLabels(map[string]string{nnfv1alpha3.StandaloneMGTLabel: pool})); err != nil { + if err := r.List(ctx, persistentStorageList, client.MatchingLabels(map[string]string{nnfv1alpha4.StandaloneMGTLabel: pool})); err != nil { return corev1.ObjectReference{}, "", err } @@ -715,7 +715,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s healthyMgts := make(map[string]corev1.ObjectReference) for _, persistentStorage := range persistentStorageList.Items { // Find the NnfStorage for the PersistentStorage so we can check its status and get the MGT LNid - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.Name, Namespace: persistentStorage.Namespace, @@ -810,14 +810,14 @@ func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, return nil } -func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha3.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha3.NnfAccess, error) { +func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha4.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha4.NnfAccess, error) { pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, parentDwIndex) nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, parentDwIndex) + "-servers", Namespace: workflow.Namespace, @@ -830,9 +830,9 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st dwsv1alpha2.AddOwnerLabels(access, workflow) addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) - nnfv1alpha3.AddDataMovementTeardownStateLabel(access, teardownState) + nnfv1alpha4.AddDataMovementTeardownStateLabel(access, teardownState) - access.Spec = nnfv1alpha3.NnfAccessSpec{ + access.Spec = nnfv1alpha4.NnfAccessSpec{ DesiredState: "mounted", TeardownState: teardownState, Target: "all", @@ -844,7 +844,7 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st // NNF Storage is Namespaced Name to the servers object StorageReference: corev1.ObjectReference{ - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), Name: storage.Name, Namespace: storage.Namespace, }, @@ -873,7 +873,7 @@ func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, return dwArgs["type"], nil case "persistentdw": name, namespace := getStorageReferenceNameFromWorkflowActual(workflow, index) - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -1046,10 +1046,10 @@ func splitStagingArgumentIntoNameAndPath(arg string) (string, string) { return name, path } -func getRabbitRelativePath(fsType string, storageRef *corev1.ObjectReference, access *nnfv1alpha3.NnfAccess, path, namespace string, index int) string { +func getRabbitRelativePath(fsType string, storageRef *corev1.ObjectReference, access *nnfv1alpha4.NnfAccess, path, namespace string, index int) string { relPath := path - if storageRef.Kind == reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { + if storageRef.Kind == reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name() { switch fsType { case "xfs", "gfs2": idxMount := getIndexMountDir(namespace, index) @@ -1115,7 +1115,7 @@ func addDirectiveIndexLabel(object metav1.Object, index int) { labels = make(map[string]string) } - labels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) + labels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(index) object.SetLabels(labels) } @@ -1125,7 +1125,7 @@ func getDirectiveIndexLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha3.DirectiveIndexLabel] + return labels[nnfv1alpha4.DirectiveIndexLabel] } func setTargetOwnerUIDLabel(object metav1.Object, value string) { @@ -1134,7 +1134,7 @@ func setTargetOwnerUIDLabel(object metav1.Object, value string) { labels = make(map[string]string) } - labels[nnfv1alpha3.TargetOwnerUidLabel] = value + labels[nnfv1alpha4.TargetOwnerUidLabel] = value object.SetLabels(labels) } @@ -1144,7 +1144,7 @@ func getTargetOwnerUIDLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha3.TargetOwnerUidLabel] + return labels[nnfv1alpha4.TargetOwnerUidLabel] } func setTargetDirectiveIndexLabel(object metav1.Object, value string) { @@ -1153,7 +1153,7 @@ func setTargetDirectiveIndexLabel(object metav1.Object, value string) { labels = make(map[string]string) } - labels[nnfv1alpha3.TargetDirectiveIndexLabel] = value + labels[nnfv1alpha4.TargetDirectiveIndexLabel] = value object.SetLabels(labels) } @@ -1163,7 +1163,7 @@ func getTargetDirectiveIndexLabel(object metav1.Object) string { return "" } - return labels[nnfv1alpha3.TargetDirectiveIndexLabel] + return labels[nnfv1alpha4.TargetDirectiveIndexLabel] } func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int, accessSuffix string) (*result, error) { @@ -1171,7 +1171,7 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, panic(fmt.Sprint("unhandled NnfAccess suffix", accessSuffix)) } - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + "-" + accessSuffix, Namespace: workflow.Namespace, @@ -1185,7 +1185,7 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, return nil, client.IgnoreNotFound(err) } - teardownState, found := access.Labels[nnfv1alpha3.DataMovementTeardownStateLabel] + teardownState, found := access.Labels[nnfv1alpha4.DataMovementTeardownStateLabel] if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { if access.Spec.DesiredState != "unmounted" { access.Spec.DesiredState = "unmounted" @@ -1224,7 +1224,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex for _, suffix := range accessSuffixes { - access := &nnfv1alpha3.NnfAccess{ + access := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index) + suffix, Namespace: workflow.Namespace, @@ -1249,7 +1249,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex } else { // When unmounting, we are conditionally dependent on the workflow state matching the // state of the teardown label, if found. - teardownState, found := access.Labels[nnfv1alpha3.DataMovementTeardownStateLabel] + teardownState, found := access.Labels[nnfv1alpha4.DataMovementTeardownStateLabel] if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { if access.Status.State != "unmounted" || !access.Status.Ready { return Requeue("pending unmount").withObject(access), nil @@ -1363,7 +1363,7 @@ func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workfl profile: profile, nnfNodes: nnfNodes, volumes: volumes, - username: nnfv1alpha3.ContainerUser, + username: nnfv1alpha4.ContainerUser, uid: int64(workflow.Spec.UserID), gid: int64(workflow.Spec.GroupID), index: index, @@ -1400,7 +1400,7 @@ func (r *NnfWorkflowReconciler) createContainerService(ctx context.Context, work } service.Spec.Selector = map[string]string{ - nnfv1alpha3.ContainerLabel: workflow.Name, + nnfv1alpha4.ContainerLabel: workflow.Name, } service.Spec.ClusterIP = corev1.ClusterIPNone @@ -1611,7 +1611,7 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * // Add workflow matchLabels + directive index (if desired) matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(index) } // Delete MPIJobs @@ -1879,7 +1879,7 @@ func (r *NnfWorkflowReconciler) getMPIJobs(ctx context.Context, workflow *dwsv1a // Get the MPIJobs for this workflow and directive index matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &mpiv2beta1.MPIJobList{} @@ -1894,7 +1894,7 @@ func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow * // Get the jobs for this workflow and directive index matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) if index >= 0 { - matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) + matchLabels[nnfv1alpha4.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &batchv1.JobList{} @@ -1906,7 +1906,7 @@ func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow * } // Create a list of volumes to be mounted inside of the containers based on the DW_JOB/DW_PERSISTENT arguments -func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha3.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { +func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha4.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { volumes := []nnfContainerVolume{} for arg, val := range dwArgs { @@ -1972,7 +1972,7 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo return nil, nil, dwsv1alpha2.NewResourceError("could not retrieve the directive breakdown for '%s'", vol.directiveName).WithMajor() } - nnfAccess := &nnfv1alpha3.NnfAccess{ + nnfAccess := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Name + "-" + strconv.Itoa(vol.directiveIndex) + "-servers", Namespace: workflow.Namespace, @@ -2017,7 +2017,7 @@ func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow // Add a port allocation request to the manager for the number of ports specified by the // container profile - pm.Spec.Allocations = append(pm.Spec.Allocations, nnfv1alpha3.NnfPortManagerAllocationSpec{ + pm.Spec.Allocations = append(pm.Spec.Allocations, nnfv1alpha4.NnfPortManagerAllocationSpec{ Requester: corev1.ObjectReference{ Name: workflow.Name, Namespace: workflow.Namespace, @@ -2058,14 +2058,14 @@ func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflo for _, alloc := range pm.Status.Allocations { if alloc.Requester != nil && alloc.Requester.UID == workflow.UID { - if alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInUse && len(alloc.Ports) == int(profile.Data.NumPorts) { + if alloc.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInUse && len(alloc.Ports) == int(profile.Data.NumPorts) { // Add workflow env var for the ports name, val := getContainerPortsEnvVar(alloc.Ports) workflow.Status.Env[name] = val return nil, nil // done - } else if alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInvalidConfiguration { + } else if alloc.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInvalidConfiguration { return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: Invalid NnfPortManager configuration").WithFatal().WithUser() - } else if alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources { + } else if alloc.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInsufficientResources { return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: InsufficientResources").WithFatal() } } @@ -2079,11 +2079,11 @@ func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflo // Retrieve the default NnfPortManager for user containers. Allow a client to be passed in as this // is meant to be used by reconcilers or container helpers. -func getContainerPortManager(ctx context.Context, cl client.Client) (*nnfv1alpha3.NnfPortManager, error) { +func getContainerPortManager(ctx context.Context, cl client.Client) (*nnfv1alpha4.NnfPortManager, error) { portManagerName := os.Getenv("NNF_PORT_MANAGER_NAME") portManagerNamespace := os.Getenv("NNF_PORT_MANAGER_NAMESPACE") - pm := &nnfv1alpha3.NnfPortManager{ + pm := &nnfv1alpha4.NnfPortManager{ ObjectMeta: metav1.ObjectMeta{ Name: portManagerName, Namespace: portManagerNamespace, @@ -2112,7 +2112,7 @@ func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workf // Find the allocation in the Status for _, alloc := range pm.Status.Allocations { - if alloc.Requester.UID == workflow.UID && alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInUse { + if alloc.Requester.UID == workflow.UID && alloc.Status == nnfv1alpha4.NnfPortManagerAllocationStatusInUse { found = true break } diff --git a/internal/controller/nnf_workflow_controller_helpers_test.go b/internal/controller/nnf_workflow_controller_helpers_test.go index 27ec1705..6e1ba033 100644 --- a/internal/controller/nnf_workflow_controller_helpers_test.go +++ b/internal/controller/nnf_workflow_controller_helpers_test.go @@ -4,7 +4,7 @@ import ( "reflect" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -50,13 +50,13 @@ var _ = Describe("NnfWorkflowControllerHelpers", func() { DescribeTable("Test NNF filesystems (NnfAccess)", func(fsType, path, output string) { // We can hardwire these fields and assume the same mountpath/mountpathprefix, index, namespace, etc - objRef := corev1.ObjectReference{Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name()} + objRef := corev1.ObjectReference{Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name()} mntPath := "/mnt/nnf/123456-0/" idx := 0 ns := "slushy44" - access := nnfv1alpha3.NnfAccess{ - Spec: nnfv1alpha3.NnfAccessSpec{ + access := nnfv1alpha4.NnfAccess{ + Spec: nnfv1alpha4.NnfAccessSpec{ MountPath: mntPath, MountPathPrefix: mntPath, }, diff --git a/internal/controller/nnf_workflow_controller_test.go b/internal/controller/nnf_workflow_controller_test.go index 5cc53ff9..9db26945 100644 --- a/internal/controller/nnf_workflow_controller_test.go +++ b/internal/controller/nnf_workflow_controller_test.go @@ -41,7 +41,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var ( @@ -58,9 +58,9 @@ var _ = Describe("NNF Workflow Unit Tests", func() { key types.NamespacedName workflow *dwsv1alpha2.Workflow setup sync.Once - storageProfile *nnfv1alpha3.NnfStorageProfile - dmProfile *nnfv1alpha3.NnfDataMovementProfile - nnfNode *nnfv1alpha3.NnfNode + storageProfile *nnfv1alpha4.NnfStorageProfile + dmProfile *nnfv1alpha4.NnfDataMovementProfile + nnfNode *nnfv1alpha4.NnfNode namespace *corev1.Namespace persistentStorageName string ) @@ -73,14 +73,14 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), namespace)).To(Succeed()) - nnfNode = &nnfv1alpha3.NnfNode{ + nnfNode = &nnfv1alpha4.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: "rabbit-node", }, - Spec: nnfv1alpha3.NnfNodeSpec{ - State: nnfv1alpha3.ResourceEnable, + Spec: nnfv1alpha4.NnfNodeSpec{ + State: nnfv1alpha4.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) @@ -145,13 +145,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) Expect(k8sClient.Delete(context.TODO(), dmProfile)).To(Succeed()) - dmProfExpected := &nnfv1alpha3.NnfDataMovementProfile{} + dmProfExpected := &nnfv1alpha4.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile), dmProfExpected) }).ShouldNot(Succeed()) @@ -192,15 +192,15 @@ var _ = Describe("NNF Workflow Unit Tests", func() { // operate. // An alternative is to create a workflow with 'create_persistent' // as its directive and actually create the full-blown persistent instance.. (painful) - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: workflow.Namespace, }, - Spec: nnfv1alpha3.NnfStorageSpec{ + Spec: nnfv1alpha4.NnfStorageSpec{ FileSystemType: fsType, - AllocationSets: []nnfv1alpha3.NnfStorageAllocationSetSpec{}, + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{}, }, } Expect(k8sClient.Create(context.TODO(), nnfStorage)).To(Succeed()) @@ -214,7 +214,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(psi), psi)).To(Succeed()) Expect(k8sClient.Delete(context.TODO(), psi)).Should(Succeed()) - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: workflow.Namespace}, } @@ -263,7 +263,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("More than one default profile", func() { - var storageProfile2 *nnfv1alpha3.NnfStorageProfile + var storageProfile2 *nnfv1alpha4.NnfStorageProfile BeforeEach(func() { // The second profile will get a different name via the call to uuid. @@ -273,7 +273,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile2)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile2), profExpected) }).ShouldNot(Succeed()) @@ -301,7 +301,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Positive tests for storage profiles", func() { - profiles := []*nnfv1alpha3.NnfStorageProfile{} + profiles := []*nnfv1alpha4.NnfStorageProfile{} profNames := []string{} BeforeEach(func() { @@ -436,7 +436,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("More than one default profile", func() { - var dmProfile2 *nnfv1alpha3.NnfDataMovementProfile + var dmProfile2 *nnfv1alpha4.NnfDataMovementProfile BeforeEach(func() { // The second profile will get a different name via the call to uuid. @@ -446,7 +446,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), dmProfile2)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfDataMovementProfile{} + profExpected := &nnfv1alpha4.NnfDataMovementProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmProfile2), profExpected) }).ShouldNot(Succeed()) @@ -474,7 +474,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Positive tests for data movement profiles", func() { - profiles := []*nnfv1alpha3.NnfDataMovementProfile{} + profiles := []*nnfv1alpha4.NnfDataMovementProfile{} profNames := []string{} var lustre *lusv1beta1.LustreFileSystem @@ -665,7 +665,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { When("Using copy_in directives", func() { var ( - dmm *nnfv1alpha3.NnfDataMovementManager + dmm *nnfv1alpha4.NnfDataMovementManager ) JustBeforeEach(func() { @@ -693,18 +693,18 @@ var _ = Describe("NNF Workflow Unit Tests", func() { BeforeEach(func() { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha3.DataMovementNamespace, + Name: nnfv1alpha4.DataMovementNamespace, }, } k8sClient.Create(context.TODO(), ns) // Ignore errors as namespace may be created from other tests - dmm = &nnfv1alpha3.NnfDataMovementManager{ + dmm = &nnfv1alpha4.NnfDataMovementManager{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfv1alpha3.DataMovementManagerName, - Namespace: nnfv1alpha3.DataMovementNamespace, + Name: nnfv1alpha4.DataMovementManagerName, + Namespace: nnfv1alpha4.DataMovementNamespace, }, - Spec: nnfv1alpha3.NnfDataMovementManagerSpec{ + Spec: nnfv1alpha4.NnfDataMovementManagerSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -714,7 +714,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }, }, }, - Status: nnfv1alpha3.NnfDataMovementManagerStatus{ + Status: nnfv1alpha4.NnfDataMovementManagerStatus{ Ready: true, }, } @@ -766,10 +766,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }).Should(Succeed(), "update to DataIn") By("creates the data movement resource") - dm := &nnfv1alpha3.NnfDataMovement{ + dm := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", workflow.Name, 1), - Namespace: nnfv1alpha3.DataMovementNamespace, + Namespace: nnfv1alpha4.DataMovementNamespace, }, } @@ -790,19 +790,19 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name()), "Name": Equal(fmt.Sprintf("%s-%d", workflow.Name, 0)), "Namespace": Equal(workflow.Namespace), })) Expect(dm.Spec.ProfileReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha3.NnfDataMovementProfile{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha4.NnfDataMovementProfile{}).Name()), "Name": Equal(indexedResourceName(workflow, 1)), "Namespace": Equal(corev1.NamespaceDefault), }, )) - Expect(dm.GetLabels()[nnfv1alpha3.DataMovementInitiatorLabel]).To(Equal("copy_in")) + Expect(dm.GetLabels()[nnfv1alpha4.DataMovementInitiatorLabel]).To(Equal("copy_in")) }) }) @@ -846,10 +846,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "transition desired state to DataIn") - dm := &nnfv1alpha3.NnfDataMovement{ + dm := &nnfv1alpha4.NnfDataMovement{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, 1), - Namespace: nnfv1alpha3.DataMovementNamespace, + Namespace: nnfv1alpha4.DataMovementNamespace, }, } @@ -870,18 +870,18 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name()), "Name": Equal(persistentStorageName), "Namespace": Equal(workflow.Namespace), })) Expect(dm.Spec.ProfileReference).To(MatchFields(IgnoreExtras, Fields{ - "Kind": Equal(reflect.TypeOf(nnfv1alpha3.NnfDataMovementProfile{}).Name()), + "Kind": Equal(reflect.TypeOf(nnfv1alpha4.NnfDataMovementProfile{}).Name()), "Name": Equal(indexedResourceName(workflow, 1)), "Namespace": Equal(corev1.NamespaceDefault), }, )) - Expect(dm.GetLabels()[nnfv1alpha3.DataMovementInitiatorLabel]).To(Equal("copy_in")) + Expect(dm.GetLabels()[nnfv1alpha4.DataMovementInitiatorLabel]).To(Equal("copy_in")) }) }) @@ -1283,7 +1283,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup }).Should(BeTrue(), "waiting for ready after setup") - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: servers.Name, Namespace: servers.Namespace, @@ -1365,8 +1365,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { createGlobalLustre bool globalLustre *lusv1beta1.LustreFileSystem - containerProfile *nnfv1alpha3.NnfContainerProfile - containerProfileStorages []nnfv1alpha3.NnfContainerProfileStorage + containerProfile *nnfv1alpha4.NnfContainerProfile + containerProfileStorages []nnfv1alpha4.NnfContainerProfileStorage createContainerProfile bool ) @@ -1503,7 +1503,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Context("when an optional storage in the container profile is not present in the container arguments", func() { BeforeEach(func() { - containerProfileStorages = []nnfv1alpha3.NnfContainerProfileStorage{ + containerProfileStorages = []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: false}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, @@ -1548,7 +1548,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Context("when a required storage in the container profile is not present in the arguments", func() { BeforeEach(func() { - containerProfileStorages = []nnfv1alpha3.NnfContainerProfileStorage{ + containerProfileStorages = []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: false}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, } @@ -1592,7 +1592,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { } }) - buildContainerProfile := func(storages []nnfv1alpha3.NnfContainerProfileStorage) { + buildContainerProfile := func(storages []nnfv1alpha4.NnfContainerProfileStorage) { By("Creating a profile with specific storages") tempProfile := basicNnfContainerProfile("restricted-"+uuid.NewString()[:8], storages) containerProfile = createNnfContainerProfile(tempProfile, true) @@ -1609,7 +1609,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { } DescribeTable("should not go to Proposal Ready", - func(argIdx int, storages []nnfv1alpha3.NnfContainerProfileStorage) { + func(argIdx int, storages []nnfv1alpha4.NnfContainerProfileStorage) { buildContainerProfile(storages) buildContainerWorkflowWithArgs(storageArgsList[argIdx]) Eventually(func(g Gomega) bool { @@ -1620,19 +1620,19 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }, Entry("when DW_JOB_ not present in the container profile", 0, - []nnfv1alpha3.NnfContainerProfileStorage{ + []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, }, ), Entry("when DW_PERSISTENT_ not present in the container profile", 1, - []nnfv1alpha3.NnfContainerProfileStorage{ + []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, }, ), Entry("when DW_GLOBAL_ not present in the container profile", 2, - []nnfv1alpha3.NnfContainerProfileStorage{ + []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, }, @@ -1738,7 +1738,7 @@ var _ = Describe("NnfStorageProfile Webhook test", func() { }) }) -func WaitForDMMReady(dmm *nnfv1alpha3.NnfDataMovementManager) { +func WaitForDMMReady(dmm *nnfv1alpha4.NnfDataMovementManager) { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dmm), dmm)).To(Succeed()) if !dmm.Status.Ready { diff --git a/internal/controller/nnfcontainerprofile_helpers.go b/internal/controller/nnfcontainerprofile_helpers.go index 352b5efe..57e90de7 100644 --- a/internal/controller/nnfcontainerprofile_helpers.go +++ b/internal/controller/nnfcontainerprofile_helpers.go @@ -32,11 +32,11 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/dwdparse" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/go-logr/logr" ) -func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { +func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha4.NnfContainerProfile, error) { profile, err := findPinnedContainerProfile(ctx, clnt, workflow, index) if err != nil { return nil, err @@ -49,8 +49,8 @@ func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv return profile, nil } -func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { - profile := &nnfv1alpha3.NnfContainerProfile{ +func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha4.NnfContainerProfile, error) { + profile := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, @@ -68,7 +68,7 @@ func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflo return profile, nil } -func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { +func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha4.NnfContainerProfile, error) { args, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { return nil, err @@ -79,7 +79,7 @@ func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dws return nil, fmt.Errorf("container directive '%s' has no profile key", workflow.Spec.DWDirectives[index]) } - profile := &nnfv1alpha3.NnfContainerProfile{ + profile := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: os.Getenv("NNF_CONTAINER_PROFILE_NAMESPACE"), @@ -121,7 +121,7 @@ func createPinnedContainerProfileIfNecessary(ctx context.Context, clnt client.Cl return err } - pinnedProfile := &nnfv1alpha3.NnfContainerProfile{ + pinnedProfile := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, diff --git a/internal/controller/nnfcontainerprofile_test.go b/internal/controller/nnfcontainerprofile_test.go index 4b2536be..655ac04d 100644 --- a/internal/controller/nnfcontainerprofile_test.go +++ b/internal/controller/nnfcontainerprofile_test.go @@ -31,17 +31,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // createNnfContainerProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfContainerProfile(containerProfile *nnfv1alpha3.NnfContainerProfile, expectSuccess bool) *nnfv1alpha3.NnfContainerProfile { +func createNnfContainerProfile(containerProfile *nnfv1alpha4.NnfContainerProfile, expectSuccess bool) *nnfv1alpha4.NnfContainerProfile { // Place NnfContainerProfiles in "default" for the test environment. containerProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(containerProfile) - profExpected := &nnfv1alpha3.NnfContainerProfile{} + profExpected := &nnfv1alpha4.NnfContainerProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -62,22 +62,22 @@ func createNnfContainerProfile(containerProfile *nnfv1alpha3.NnfContainerProfile } // basicNnfContainerProfile creates a simple NnfContainerProfile struct. -func basicNnfContainerProfile(name string, storages []nnfv1alpha3.NnfContainerProfileStorage) *nnfv1alpha3.NnfContainerProfile { +func basicNnfContainerProfile(name string, storages []nnfv1alpha4.NnfContainerProfileStorage) *nnfv1alpha4.NnfContainerProfile { // default storages if not supplied, optional by default if len(storages) == 0 { - storages = []nnfv1alpha3.NnfContainerProfileStorage{ + storages = []nnfv1alpha4.NnfContainerProfileStorage{ {Name: "DW_JOB_foo_local_storage", Optional: true}, {Name: "DW_PERSISTENT_foo_persistent_storage", Optional: true}, {Name: "DW_GLOBAL_foo_global_lustre", Optional: true}, } } - containerProfile := &nnfv1alpha3.NnfContainerProfile{ + containerProfile := &nnfv1alpha4.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Data: nnfv1alpha3.NnfContainerProfileData{ + Data: nnfv1alpha4.NnfContainerProfileData{ Pinned: false, Storages: storages, Spec: &corev1.PodSpec{ @@ -92,7 +92,7 @@ func basicNnfContainerProfile(name string, storages []nnfv1alpha3.NnfContainerPr } // createBasicNnfContainerProfile creates a simple default container profile. -func createBasicNnfContainerProfile(storages []nnfv1alpha3.NnfContainerProfileStorage) *nnfv1alpha3.NnfContainerProfile { +func createBasicNnfContainerProfile(storages []nnfv1alpha4.NnfContainerProfileStorage) *nnfv1alpha4.NnfContainerProfile { containerProfile := basicNnfContainerProfile("sample-"+uuid.NewString()[:8], storages) return createNnfContainerProfile(containerProfile, true) } diff --git a/internal/controller/nnfdatamovementprofile_helpers.go b/internal/controller/nnfdatamovementprofile_helpers.go index a10b65ff..180b911c 100644 --- a/internal/controller/nnfdatamovementprofile_helpers.go +++ b/internal/controller/nnfdatamovementprofile_helpers.go @@ -32,14 +32,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // findProfileToUse verifies a NnfDataMovementProfile named in the directive or verifies that a default can be found. -func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha3.NnfDataMovementProfile, error) { +func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha4.NnfDataMovementProfile, error) { var profileName string - NnfDataMovementProfile := &nnfv1alpha3.NnfDataMovementProfile{} + NnfDataMovementProfile := &nnfv1alpha4.NnfDataMovementProfile{} profileNamespace := os.Getenv("NNF_DM_PROFILE_NAMESPACE") @@ -47,7 +47,7 @@ func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string // that a default profile can be found. profileName, present := args["profile"] if present == false { - NnfDataMovementProfiles := &nnfv1alpha3.NnfDataMovementProfileList{} + NnfDataMovementProfiles := &nnfv1alpha4.NnfDataMovementProfileList{} if err := clnt.List(ctx, NnfDataMovementProfiles, &client.ListOptions{Namespace: profileNamespace}); err != nil { return nil, err } @@ -78,9 +78,9 @@ func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string } // findPinnedProfile finds the specified pinned profile. -func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha3.NnfDataMovementProfile, error) { +func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha4.NnfDataMovementProfile, error) { - NnfDataMovementProfile := &nnfv1alpha3.NnfDataMovementProfile{} + NnfDataMovementProfile := &nnfv1alpha4.NnfDataMovementProfile{} err := clnt.Get(ctx, types.NamespacedName{Namespace: namespace, Name: pinnedName}, NnfDataMovementProfile) if err != nil { return nil, err @@ -92,7 +92,7 @@ func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace stri } // createPinnedProfile finds the specified profile and makes a pinned copy of it. -func createPinnedDMProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha3.NnfDataMovementProfile, error) { +func createPinnedDMProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha4.NnfDataMovementProfile, error) { // If we've already pinned a profile, then we're done and // we no longer have a use for the original profile. diff --git a/internal/controller/nnfdatamovementprofile_test.go b/internal/controller/nnfdatamovementprofile_test.go index 0c93e6d8..5aef830d 100644 --- a/internal/controller/nnfdatamovementprofile_test.go +++ b/internal/controller/nnfdatamovementprofile_test.go @@ -30,17 +30,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // createNnfDataMovementProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha3.NnfDataMovementProfile, expectSuccess bool) *nnfv1alpha3.NnfDataMovementProfile { +func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha4.NnfDataMovementProfile, expectSuccess bool) *nnfv1alpha4.NnfDataMovementProfile { // Place NnfDataMovementProfiles in "default" for the test environment. DataMovementProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(DataMovementProfile) - profExpected := &nnfv1alpha3.NnfDataMovementProfile{} + profExpected := &nnfv1alpha4.NnfDataMovementProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -61,8 +61,8 @@ func createNnfDataMovementProfile(DataMovementProfile *nnfv1alpha3.NnfDataMoveme } // basicNnfDataMovementProfile creates a simple NnfDataMovementProfile struct. -func basicNnfDataMovementProfile(name string) *nnfv1alpha3.NnfDataMovementProfile { - DataMovementProfile := &nnfv1alpha3.NnfDataMovementProfile{ +func basicNnfDataMovementProfile(name string) *nnfv1alpha4.NnfDataMovementProfile { + DataMovementProfile := &nnfv1alpha4.NnfDataMovementProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -71,14 +71,14 @@ func basicNnfDataMovementProfile(name string) *nnfv1alpha3.NnfDataMovementProfil } // createBasicDefaultNnfDataMovementProfile creates a simple default storage profile. -func createBasicDefaultNnfDataMovementProfile() *nnfv1alpha3.NnfDataMovementProfile { +func createBasicDefaultNnfDataMovementProfile() *nnfv1alpha4.NnfDataMovementProfile { DataMovementProfile := basicNnfDataMovementProfile("durable-" + uuid.NewString()[:8]) DataMovementProfile.Data.Default = true return createNnfDataMovementProfile(DataMovementProfile, true) } // createBasicDefaultNnfDataMovementProfile creates a simple default storage profile. -func createBasicPinnedNnfDataMovementProfile() *nnfv1alpha3.NnfDataMovementProfile { +func createBasicPinnedNnfDataMovementProfile() *nnfv1alpha4.NnfDataMovementProfile { DataMovementProfile := basicNnfDataMovementProfile("durable-" + uuid.NewString()[:8]) DataMovementProfile.Data.Pinned = true return createNnfDataMovementProfile(DataMovementProfile, true) diff --git a/internal/controller/nnfstorageprofile_helpers.go b/internal/controller/nnfstorageprofile_helpers.go index 70ed33e1..a407f86d 100644 --- a/internal/controller/nnfstorageprofile_helpers.go +++ b/internal/controller/nnfstorageprofile_helpers.go @@ -32,14 +32,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // findProfileToUse verifies a NnfStorageProfile named in the directive or verifies that a default can be found. -func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha3.NnfStorageProfile, error) { +func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]string) (*nnfv1alpha4.NnfStorageProfile, error) { var profileName string - nnfStorageProfile := &nnfv1alpha3.NnfStorageProfile{} + nnfStorageProfile := &nnfv1alpha4.NnfStorageProfile{} profileNamespace := os.Getenv("NNF_STORAGE_PROFILE_NAMESPACE") @@ -47,7 +47,7 @@ func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]s // that a default profile can be found. profileName, present := args["profile"] if present == false { - nnfStorageProfiles := &nnfv1alpha3.NnfStorageProfileList{} + nnfStorageProfiles := &nnfv1alpha4.NnfStorageProfileList{} if err := clnt.List(ctx, nnfStorageProfiles, &client.ListOptions{Namespace: profileNamespace}); err != nil { return nil, err } @@ -78,9 +78,9 @@ func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]s } // findPinnedProfile finds the specified pinned profile. -func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha3.NnfStorageProfile, error) { +func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string, pinnedName string) (*nnfv1alpha4.NnfStorageProfile, error) { - nnfStorageProfile := &nnfv1alpha3.NnfStorageProfile{} + nnfStorageProfile := &nnfv1alpha4.NnfStorageProfile{} err := clnt.Get(ctx, types.NamespacedName{Namespace: namespace, Name: pinnedName}, nnfStorageProfile) if err != nil { return nil, err @@ -92,7 +92,7 @@ func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string } // createPinnedProfile finds the specified profile and makes a pinned copy of it. -func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha3.NnfStorageProfile, error) { +func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *runtime.Scheme, args map[string]string, owner metav1.Object, pinnedName string) (*nnfv1alpha4.NnfStorageProfile, error) { // If we've already pinned a profile, then we're done and // we no longer have a use for the original profile. @@ -134,32 +134,32 @@ func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *ru // addPinnedStorageProfileLabel adds name/namespace labels to a resource to indicate // which pinned storage profile is being used with that resource. -func addPinnedStorageProfileLabel(object metav1.Object, nnfStorageProfile *nnfv1alpha3.NnfStorageProfile) { +func addPinnedStorageProfileLabel(object metav1.Object, nnfStorageProfile *nnfv1alpha4.NnfStorageProfile) { labels := object.GetLabels() if labels == nil { labels = make(map[string]string) } - labels[nnfv1alpha3.PinnedStorageProfileLabelName] = nnfStorageProfile.GetName() - labels[nnfv1alpha3.PinnedStorageProfileLabelNameSpace] = nnfStorageProfile.GetNamespace() + labels[nnfv1alpha4.PinnedStorageProfileLabelName] = nnfStorageProfile.GetName() + labels[nnfv1alpha4.PinnedStorageProfileLabelNameSpace] = nnfStorageProfile.GetNamespace() object.SetLabels(labels) } // getPinnedStorageProfileFromLabel finds the pinned storage profile via the labels on the // specified resource. -func getPinnedStorageProfileFromLabel(ctx context.Context, clnt client.Client, object metav1.Object) (*nnfv1alpha3.NnfStorageProfile, error) { +func getPinnedStorageProfileFromLabel(ctx context.Context, clnt client.Client, object metav1.Object) (*nnfv1alpha4.NnfStorageProfile, error) { labels := object.GetLabels() if labels == nil { return nil, dwsv1alpha2.NewResourceError("unable to find labels").WithFatal() } - pinnedName, okName := labels[nnfv1alpha3.PinnedStorageProfileLabelName] + pinnedName, okName := labels[nnfv1alpha4.PinnedStorageProfileLabelName] if !okName { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelName).WithFatal() + return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha4.PinnedStorageProfileLabelName).WithFatal() } - pinnedNamespace, okNamespace := labels[nnfv1alpha3.PinnedStorageProfileLabelNameSpace] + pinnedNamespace, okNamespace := labels[nnfv1alpha4.PinnedStorageProfileLabelNameSpace] if !okNamespace { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelNameSpace).WithFatal() + return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha4.PinnedStorageProfileLabelNameSpace).WithFatal() } return findPinnedProfile(ctx, clnt, pinnedNamespace, pinnedName) diff --git a/internal/controller/nnfstorageprofile_test.go b/internal/controller/nnfstorageprofile_test.go index acf44ff1..a6a78022 100644 --- a/internal/controller/nnfstorageprofile_test.go +++ b/internal/controller/nnfstorageprofile_test.go @@ -30,17 +30,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) // createNnfStorageProfile creates the given profile in the "default" namespace. // When expectSuccess=false, we expect to find that it was failed by the webhook. -func createNnfStorageProfile(storageProfile *nnfv1alpha3.NnfStorageProfile, expectSuccess bool) *nnfv1alpha3.NnfStorageProfile { +func createNnfStorageProfile(storageProfile *nnfv1alpha4.NnfStorageProfile, expectSuccess bool) *nnfv1alpha4.NnfStorageProfile { // Place NnfStorageProfiles in "default" for the test environment. storageProfile.ObjectMeta.Namespace = corev1.NamespaceDefault profKey := client.ObjectKeyFromObject(storageProfile) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} err := k8sClient.Get(context.TODO(), profKey, profExpected) Expect(err).ToNot(BeNil()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) @@ -61,8 +61,8 @@ func createNnfStorageProfile(storageProfile *nnfv1alpha3.NnfStorageProfile, expe } // basicNnfStorageProfile creates a simple NnfStorageProfile struct. -func basicNnfStorageProfile(name string) *nnfv1alpha3.NnfStorageProfile { - storageProfile := &nnfv1alpha3.NnfStorageProfile{ +func basicNnfStorageProfile(name string) *nnfv1alpha4.NnfStorageProfile { + storageProfile := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -71,14 +71,14 @@ func basicNnfStorageProfile(name string) *nnfv1alpha3.NnfStorageProfile { } // createBasicDefaultNnfStorageProfile creates a simple default storage profile. -func createBasicDefaultNnfStorageProfile() *nnfv1alpha3.NnfStorageProfile { +func createBasicDefaultNnfStorageProfile() *nnfv1alpha4.NnfStorageProfile { storageProfile := basicNnfStorageProfile("durable-" + uuid.NewString()[:8]) storageProfile.Data.Default = true return createNnfStorageProfile(storageProfile, true) } // createBasicDefaultNnfStorageProfile creates a simple default storage profile. -func createBasicPinnedNnfStorageProfile() *nnfv1alpha3.NnfStorageProfile { +func createBasicPinnedNnfStorageProfile() *nnfv1alpha4.NnfStorageProfile { storageProfile := basicNnfStorageProfile("durable-" + uuid.NewString()[:8]) storageProfile.Data.Pinned = true return createNnfStorageProfile(storageProfile, true) diff --git a/internal/controller/nnfsystemstorage_controller.go b/internal/controller/nnfsystemstorage_controller.go index a95517db..8587d618 100644 --- a/internal/controller/nnfsystemstorage_controller.go +++ b/internal/controller/nnfsystemstorage_controller.go @@ -39,7 +39,7 @@ import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -71,7 +71,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req metrics.NnfSystemStorageReconcilesTotal.Inc() - nnfSystemStorage := &nnfv1alpha3.NnfSystemStorage{} + nnfSystemStorage := &nnfv1alpha4.NnfSystemStorage{} if err := r.Get(ctx, req.NamespacedName, nnfSystemStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -79,7 +79,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha3.NnfSystemStorageStatus](nnfSystemStorage) + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha4.NnfSystemStorageStatus](nnfSystemStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { nnfSystemStorage.Status.SetResourceErrorAndLog(err, log) }() @@ -164,7 +164,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req // Get the SystemConfiguration. If a SystemConfiguration is specified in the NnfSystemStorage, use that. // Otherwise, use the default/default SystemConfiguration. -func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (*dwsv1alpha2.SystemConfiguration, error) { +func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) (*dwsv1alpha2.SystemConfiguration, error) { systemConfiguration := &dwsv1alpha2.SystemConfiguration{} if nnfSystemStorage.Spec.SystemConfiguration != (corev1.ObjectReference{}) { @@ -192,16 +192,16 @@ func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, // Get the StorageProfile specified in the spec. We don't look for the default profile, a profile must be // specified in the NnfSystemStorage spec, and it must be marked as pinned. -func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (*nnfv1alpha3.NnfStorageProfile, error) { +func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) (*nnfv1alpha4.NnfStorageProfile, error) { if nnfSystemStorage.Spec.StorageProfile == (corev1.ObjectReference{}) { return nil, dwsv1alpha2.NewResourceError("StorageProfile must be specified").WithFatal() } - if nnfSystemStorage.Spec.StorageProfile.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name() { - return nil, dwsv1alpha2.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name()).WithFatal() + if nnfSystemStorage.Spec.StorageProfile.Kind != reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name() { + return nil, dwsv1alpha2.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name()).WithFatal() } - storageProfile := &nnfv1alpha3.NnfStorageProfile{ + storageProfile := &nnfv1alpha4.NnfStorageProfile{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.Spec.StorageProfile.Name, Namespace: nnfSystemStorage.Spec.StorageProfile.Namespace, @@ -218,7 +218,7 @@ func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfS // Create a Servers resource with one allocation on each Rabbit. If the IncludeRabbits array is not // empty, only use those Rabbits. Otherwise, use all the Rabbits in the SystemConfiguration resource except // those specified in the ExcludeRabbits array. -func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) // Create a list of Rabbits to use @@ -338,7 +338,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste // in the servers resource and exclude any computes listed in ExcludeComputes. Additionally, the ComputesTarget field determines // which of the Rabbits computes to include: all, even, odd, or a custom list. This is done using the index of the compute node // in the SystemConfiguration. -func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) // Get a list of compute nodes to use @@ -375,13 +375,13 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst // Make a list of compute node index values based on the ComputesTarget field var indexList []int switch nnfSystemStorage.Spec.ComputesTarget { - case nnfv1alpha3.ComputesTargetAll: + case nnfv1alpha4.ComputesTargetAll: indexList = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - case nnfv1alpha3.ComputesTargetEven: + case nnfv1alpha4.ComputesTargetEven: indexList = []int{0, 2, 4, 6, 8, 10, 12, 14} - case nnfv1alpha3.ComputesTargetOdd: + case nnfv1alpha4.ComputesTargetOdd: indexList = []int{1, 3, 5, 7, 9, 11, 13, 15} - case nnfv1alpha3.ComputesTargetPattern: + case nnfv1alpha4.ComputesTargetPattern: indexList = append([]int(nil), nnfSystemStorage.Spec.ComputesPattern...) default: return dwsv1alpha2.NewResourceError("undexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() @@ -460,7 +460,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst } // Create a NnfStorage resource using the list of Rabbits in the Servers resource -func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) storageProfile, err := r.getStorageProfile(ctx, nnfSystemStorage) @@ -479,7 +479,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy return dwsv1alpha2.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) } - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -497,11 +497,11 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy nnfStorage.Spec.GroupID = 0 // Need to remove all of the AllocationSets in the NnfStorage object before we begin - nnfStorage.Spec.AllocationSets = []nnfv1alpha3.NnfStorageAllocationSetSpec{} + nnfStorage.Spec.AllocationSets = []nnfv1alpha4.NnfStorageAllocationSetSpec{} // Iterate the Servers data elements to pull out the allocation sets for the server for i := range servers.Spec.AllocationSets { - nnfAllocationSet := nnfv1alpha3.NnfStorageAllocationSetSpec{} + nnfAllocationSet := nnfv1alpha4.NnfStorageAllocationSetSpec{} nnfAllocationSet.Name = servers.Spec.AllocationSets[i].Label nnfAllocationSet.Capacity = servers.Spec.AllocationSets[i].AllocationSize @@ -509,7 +509,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy // Create Nodes for this allocation set. for _, storage := range servers.Spec.AllocationSets[i].Storage { - node := nnfv1alpha3.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} + node := nnfv1alpha4.NnfStorageAllocationNodes{Name: storage.Name, Count: storage.AllocationCount} nnfAllocationSet.Nodes = append(nnfAllocationSet.Nodes, node) } @@ -535,9 +535,9 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy } // Wait until the NnfStorage has completed. Any errors will bubble up to the NnfSystemStorage -func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (bool, error) { +func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) (bool, error) { // Check whether the NnfStorage has finished - nnfStorage := &nnfv1alpha3.NnfStorage{ + nnfStorage := &nnfv1alpha4.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -566,7 +566,7 @@ func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfS // Create an NnfAccess using the Computes resource we created earlier. This NnfAccess may or may not create any ClientMount // resources depending on if MakeClientMounts was specified in the NnfSystemStorage spec. The NnfAccess target is "shared", // meaning that multiple compute nodes will access the same storage. -func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) error { +func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) error { log := r.Log.WithValues("NnfSystemStorage", client.ObjectKeyFromObject(nnfSystemStorage)) storageProfile, err := r.getStorageProfile(ctx, nnfSystemStorage) @@ -574,7 +574,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys return err } - nnfAccess := &nnfv1alpha3.NnfAccess{ + nnfAccess := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -604,7 +604,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys nnfAccess.Spec.StorageReference = corev1.ObjectReference{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorage{}).Name(), } return ctrl.SetControllerReference(nnfSystemStorage, nnfAccess, r.Scheme) @@ -625,8 +625,8 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys } // Wait for the NnfAccess to be ready. Any errors are bubbled up to the NnfSystemStorage -func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (bool, error) { - nnfAccess := &nnfv1alpha3.NnfAccess{ +func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSystemStorage *nnfv1alpha4.NnfSystemStorage) (bool, error) { + nnfAccess := &nnfv1alpha4.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -659,7 +659,7 @@ func (r *NnfSystemStorageReconciler) NnfSystemStorageEnqueueAll(ctx context.Cont requests := []reconcile.Request{} // Find all the NnfSystemStorage resources and add them to the Request list - nnfSystemStorageList := &nnfv1alpha3.NnfSystemStorageList{} + nnfSystemStorageList := &nnfv1alpha4.NnfSystemStorageList{} if err := r.List(context.TODO(), nnfSystemStorageList, []client.ListOption{}...); err != nil { log.Info("Could not list NnfSystemStorage", "error", err) return requests @@ -675,18 +675,18 @@ func (r *NnfSystemStorageReconciler) NnfSystemStorageEnqueueAll(ctx context.Cont // SetupWithManager sets up the controller with the Manager. func (r *NnfSystemStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ - &nnfv1alpha3.NnfAccessList{}, - &nnfv1alpha3.NnfStorageList{}, + &nnfv1alpha4.NnfAccessList{}, + &nnfv1alpha4.NnfStorageList{}, &dwsv1alpha2.ComputesList{}, &dwsv1alpha2.ServersList{}, } return ctrl.NewControllerManagedBy(mgr). - For(&nnfv1alpha3.NnfSystemStorage{}). + For(&nnfv1alpha4.NnfSystemStorage{}). Owns(&dwsv1alpha2.Computes{}). Owns(&dwsv1alpha2.Servers{}). - Owns(&nnfv1alpha3.NnfStorage{}). - Owns(&nnfv1alpha3.NnfAccess{}). + Owns(&nnfv1alpha4.NnfStorage{}). + Owns(&nnfv1alpha4.NnfAccess{}). Watches(&dwsv1alpha2.Storage{}, handler.EnqueueRequestsFromMapFunc(r.NnfSystemStorageEnqueueAll)). Complete(r) } diff --git a/internal/controller/nnfsystemstorage_controller_test.go b/internal/controller/nnfsystemstorage_controller_test.go index df370eb5..65a663a1 100644 --- a/internal/controller/nnfsystemstorage_controller_test.go +++ b/internal/controller/nnfsystemstorage_controller_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" ) var _ = Describe("NnfSystemStorage Controller Test", func() { @@ -42,11 +42,11 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { "rabbit-systemstorage-node-1", "rabbit-systemstorage-node-2"} - nnfNodes := [2]*nnfv1alpha3.NnfNode{} + nnfNodes := [2]*nnfv1alpha4.NnfNode{} nodes := [2]*corev1.Node{} var systemConfiguration *dwsv1alpha2.SystemConfiguration - var storageProfile *nnfv1alpha3.NnfStorageProfile + var storageProfile *nnfv1alpha4.NnfStorageProfile var setup sync.Once BeforeEach(func() { @@ -216,7 +216,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Labels: map[string]string{ - nnfv1alpha3.RabbitNodeSelectorLabel: "true", + nnfv1alpha4.RabbitNodeSelectorLabel: "true", }, }, Status: corev1.NodeStatus{ @@ -231,14 +231,14 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Expect(k8sClient.Create(context.TODO(), nodes[i])).To(Succeed()) - nnfNodes[i] = &nnfv1alpha3.NnfNode{ + nnfNodes[i] = &nnfv1alpha4.NnfNode{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "nnf-nlc", Namespace: nodeName, }, - Spec: nnfv1alpha3.NnfNodeSpec{ - State: nnfv1alpha3.ResourceEnable, + Spec: nnfv1alpha4.NnfNodeSpec{ + State: nnfv1alpha4.ResourceEnable, }, } Expect(k8sClient.Create(context.TODO(), nnfNodes[i])).To(Succeed()) @@ -267,14 +267,14 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) - profExpected := &nnfv1alpha3.NnfStorageProfile{} + profExpected := &nnfv1alpha4.NnfStorageProfile{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) for i := range nodeNames { Expect(k8sClient.Delete(context.TODO(), nnfNodes[i])).To(Succeed()) - tempNnfNode := &nnfv1alpha3.NnfNode{} + tempNnfNode := &nnfv1alpha4.NnfNode{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), tempNnfNode) }).ShouldNot(Succeed()) @@ -295,20 +295,20 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Describe("Create NnfSystemStorage", func() { It("Creates basic system storage", func() { - nnfSystemStorage := &nnfv1alpha3.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{ + Spec: nnfv1alpha4.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha3.ComputesTargetAll, + ComputesTarget: nnfv1alpha4.ComputesTargetAll, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name(), }, }, } @@ -354,20 +354,20 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates even system storage", func() { - nnfSystemStorage := &nnfv1alpha3.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{ + Spec: nnfv1alpha4.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha3.ComputesTargetEven, + ComputesTarget: nnfv1alpha4.ComputesTargetEven, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name(), }, }, } @@ -413,21 +413,21 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates system storage with index map", func() { - nnfSystemStorage := &nnfv1alpha3.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{ + Spec: nnfv1alpha4.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha3.ComputesTargetPattern, + ComputesTarget: nnfv1alpha4.ComputesTargetPattern, ComputesPattern: []int{0, 1, 2, 3, 4}, MakeClientMounts: false, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name(), }, }, } @@ -473,14 +473,14 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { }) It("Creates system storage with excluded Rabbits and computes", func() { - nnfSystemStorage := &nnfv1alpha3.NnfSystemStorage{ + nnfSystemStorage := &nnfv1alpha4.NnfSystemStorage{ ObjectMeta: metav1.ObjectMeta{ Name: "nnf-system-storage", Namespace: corev1.NamespaceDefault, }, - Spec: nnfv1alpha3.NnfSystemStorageSpec{ + Spec: nnfv1alpha4.NnfSystemStorageSpec{ Type: "raw", - ComputesTarget: nnfv1alpha3.ComputesTargetAll, + ComputesTarget: nnfv1alpha4.ComputesTargetAll, ExcludeRabbits: []string{nodeNames[0]}, ExcludeComputes: []string{"1-4", "1-5", "1-6"}, MakeClientMounts: false, @@ -488,7 +488,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), Namespace: storageProfile.GetNamespace(), - Kind: reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name(), + Kind: reflect.TypeOf(nnfv1alpha4.NnfStorageProfile{}).Name(), }, }, } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 3fd884ab..5fe01a19 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -53,6 +53,7 @@ import ( nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" _ "github.com/DataWorkflowServices/dws/config/crd/bases" _ "github.com/DataWorkflowServices/dws/config/webhook" @@ -151,6 +152,9 @@ var _ = BeforeSuite(func() { err = nnfv1alpha3.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = nnfv1alpha4.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + //+kubebuilder:scaffold:scheme webhookPaths := []string{} @@ -215,46 +219,46 @@ var _ = BeforeSuite(func() { err = (&lusv1beta1.LustreFileSystem{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfStorageProfile{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfStorageProfile{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfContainerProfile{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfContainerProfile{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfDataMovementProfile{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfDataMovementProfile{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfAccess{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfAccess{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfDataMovement{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfDataMovement{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfDataMovementManager{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfDataMovementManager{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfLustreMGT{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfLustreMGT{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfNode{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfNode{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfNodeBlockStorage{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfNodeBlockStorage{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfNodeECData{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfNodeECData{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfNodeStorage{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfNodeStorage{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfPortManager{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfPortManager{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfStorage{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfStorage{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) - err = (&nnfv1alpha3.NnfSystemStorage{}).SetupWebhookWithManager(k8sManager) + err = (&nnfv1alpha4.NnfSystemStorage{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) // +crdbumper:scaffold:builder diff --git a/mount-daemon/main.go b/mount-daemon/main.go index 9d383b2f..3ea05df2 100644 --- a/mount-daemon/main.go +++ b/mount-daemon/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" "github.com/NearNodeFlash/nnf-sos/mount-daemon/version" //+kubebuilder:scaffold:imports @@ -69,7 +69,7 @@ type Service struct { func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) - utilruntime.Must(nnfv1alpha3.AddToScheme(scheme)) + utilruntime.Must(nnfv1alpha4.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } From 85d4c502a72af3a01ebe0cc196f8709a814129de Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:24 -0600 Subject: [PATCH 07/23] CRDBUMPER-bump-apis Point earlier spoke APIs at new hub v1alpha4. The conversion_test.go and the ConvertTo()/ConvertFrom() routines in conversion.go are still valid for the new hub because it is currently identical to the previous hub. Update the k8s:conversion-gen marker in doc.go to point to the new hub. ACTION: Some API libraries may have been referencing one of these non-local APIs. Verify that these APIs are being referenced by their correct versions: DirectiveBreakdown, Workflow Signed-off-by: Blake Devcich --- api/v1alpha1/conversion.go | 154 ++++++++++++++++---------------- api/v1alpha1/conversion_test.go | 30 +++---- api/v1alpha1/doc.go | 2 +- api/v1alpha2/conversion.go | 150 +++++++++++++++---------------- api/v1alpha2/conversion_test.go | 30 +++---- api/v1alpha2/doc.go | 2 +- 6 files changed, 184 insertions(+), 184 deletions(-) diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index 8afba1a4..fd10b5f4 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" logf "sigs.k8s.io/controller-runtime/pkg/log" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" ) @@ -34,14 +34,14 @@ var convertlog = logf.Log.V(2).WithName("convert-v1alpha1") func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfAccess To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfAccess) + dst := dstRaw.(*nnfv1alpha4.NnfAccess) - if err := Convert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfAccess{} + restored := &nnfv1alpha4.NnfAccess{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -53,10 +53,10 @@ func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfAccess) + src := srcRaw.(*nnfv1alpha4.NnfAccess) convertlog.Info("Convert NnfAccess From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(src, dst, nil); err != nil { return err } @@ -66,14 +66,14 @@ func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfContainerProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfContainerProfile) + dst := dstRaw.(*nnfv1alpha4.NnfContainerProfile) - if err := Convert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfContainerProfile{} + restored := &nnfv1alpha4.NnfContainerProfile{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -85,10 +85,10 @@ func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfContainerProfile) + src := srcRaw.(*nnfv1alpha4.NnfContainerProfile) convertlog.Info("Convert NnfContainerProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(src, dst, nil); err != nil { return err } @@ -98,14 +98,14 @@ func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovement To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovement) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovement) - if err := Convert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovement{} + restored := &nnfv1alpha4.NnfDataMovement{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -117,10 +117,10 @@ func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovement) + src := srcRaw.(*nnfv1alpha4.NnfDataMovement) convertlog.Info("Convert NnfDataMovement From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(src, dst, nil); err != nil { return err } @@ -130,14 +130,14 @@ func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovementManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovementManager) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementManager) - if err := Convert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovementManager{} + restored := &nnfv1alpha4.NnfDataMovementManager{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -149,10 +149,10 @@ func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovementManager) + src := srcRaw.(*nnfv1alpha4.NnfDataMovementManager) convertlog.Info("Convert NnfDataMovementManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(src, dst, nil); err != nil { return err } @@ -162,14 +162,14 @@ func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovementProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovementProfile) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementProfile) - if err := Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovementProfile{} + restored := &nnfv1alpha4.NnfDataMovementProfile{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -181,10 +181,10 @@ func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovementProfile) + src := srcRaw.(*nnfv1alpha4.NnfDataMovementProfile) convertlog.Info("Convert NnfDataMovementProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(src, dst, nil); err != nil { return err } @@ -194,14 +194,14 @@ func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfLustreMGT To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfLustreMGT) + dst := dstRaw.(*nnfv1alpha4.NnfLustreMGT) - if err := Convert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfLustreMGT{} + restored := &nnfv1alpha4.NnfLustreMGT{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -213,10 +213,10 @@ func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfLustreMGT) + src := srcRaw.(*nnfv1alpha4.NnfLustreMGT) convertlog.Info("Convert NnfLustreMGT From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(src, dst, nil); err != nil { return err } @@ -226,14 +226,14 @@ func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNode To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNode) + dst := dstRaw.(*nnfv1alpha4.NnfNode) - if err := Convert_v1alpha1_NnfNode_To_v1alpha3_NnfNode(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNode{} + restored := &nnfv1alpha4.NnfNode{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -245,10 +245,10 @@ func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNode) + src := srcRaw.(*nnfv1alpha4.NnfNode) convertlog.Info("Convert NnfNode From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNode_To_v1alpha1_NnfNode(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(src, dst, nil); err != nil { return err } @@ -258,14 +258,14 @@ func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeBlockStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeBlockStorage) + dst := dstRaw.(*nnfv1alpha4.NnfNodeBlockStorage) - if err := Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeBlockStorage{} + restored := &nnfv1alpha4.NnfNodeBlockStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -277,10 +277,10 @@ func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeBlockStorage) + src := srcRaw.(*nnfv1alpha4.NnfNodeBlockStorage) convertlog.Info("Convert NnfNodeBlockStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(src, dst, nil); err != nil { return err } @@ -290,14 +290,14 @@ func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeECData To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeECData) + dst := dstRaw.(*nnfv1alpha4.NnfNodeECData) - if err := Convert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeECData{} + restored := &nnfv1alpha4.NnfNodeECData{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -309,10 +309,10 @@ func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeECData) + src := srcRaw.(*nnfv1alpha4.NnfNodeECData) convertlog.Info("Convert NnfNodeECData From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(src, dst, nil); err != nil { return err } @@ -322,14 +322,14 @@ func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeStorage) + dst := dstRaw.(*nnfv1alpha4.NnfNodeStorage) - if err := Convert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeStorage{} + restored := &nnfv1alpha4.NnfNodeStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -341,10 +341,10 @@ func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeStorage) + src := srcRaw.(*nnfv1alpha4.NnfNodeStorage) convertlog.Info("Convert NnfNodeStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(src, dst, nil); err != nil { return err } @@ -354,14 +354,14 @@ func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfPortManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfPortManager) + dst := dstRaw.(*nnfv1alpha4.NnfPortManager) - if err := Convert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfPortManager{} + restored := &nnfv1alpha4.NnfPortManager{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -373,10 +373,10 @@ func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfPortManager) + src := srcRaw.(*nnfv1alpha4.NnfPortManager) convertlog.Info("Convert NnfPortManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(src, dst, nil); err != nil { return err } @@ -386,14 +386,14 @@ func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfStorage) + dst := dstRaw.(*nnfv1alpha4.NnfStorage) - if err := Convert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfStorage{} + restored := &nnfv1alpha4.NnfStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -405,10 +405,10 @@ func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfStorage) + src := srcRaw.(*nnfv1alpha4.NnfStorage) convertlog.Info("Convert NnfStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(src, dst, nil); err != nil { return err } @@ -418,14 +418,14 @@ func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfStorageProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfStorageProfile) + dst := dstRaw.(*nnfv1alpha4.NnfStorageProfile) - if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfStorageProfile{} + restored := &nnfv1alpha4.NnfStorageProfile{} hasAnno, err := utilconversion.UnmarshalData(src, restored) if err != nil { return err @@ -455,10 +455,10 @@ func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfStorageProfile) + src := srcRaw.(*nnfv1alpha4.NnfStorageProfile) convertlog.Info("Convert NnfStorageProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(src, dst, nil); err != nil { return err } @@ -468,14 +468,14 @@ func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfSystemStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfSystemStorage) + dst := dstRaw.(*nnfv1alpha4.NnfSystemStorage) - if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfSystemStorage{} + restored := &nnfv1alpha4.NnfSystemStorage{} hasAnno, err := utilconversion.UnmarshalData(src, restored) if err != nil { return err @@ -494,10 +494,10 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfSystemStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfSystemStorage) + src := srcRaw.(*nnfv1alpha4.NnfSystemStorage) convertlog.Info("Convert NnfSystemStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(src, dst, nil); err != nil { return err } @@ -628,14 +628,14 @@ func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { // The conversion-gen tool dropped these from zz_generated.conversion.go to // force us to acknowledge that we are addressing the conversion requirements. -func Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *nnfv1alpha3.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in, out, s) +func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in, out, s) } -func Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *nnfv1alpha3.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in, out, s) +func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *nnfv1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in, out, s) } -func Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *nnfv1alpha3.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in, out, s) +func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in, out, s) } diff --git a/api/v1alpha1/conversion_test.go b/api/v1alpha1/conversion_test.go index e5ccf189..bfc85ae7 100644 --- a/api/v1alpha1/conversion_test.go +++ b/api/v1alpha1/conversion_test.go @@ -24,79 +24,79 @@ import ( . "github.com/onsi/ginkgo/v2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for NnfAccess", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfAccess{}, + Hub: &nnfv1alpha4.NnfAccess{}, Spoke: &NnfAccess{}, })) t.Run("for NnfContainerProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfContainerProfile{}, + Hub: &nnfv1alpha4.NnfContainerProfile{}, Spoke: &NnfContainerProfile{}, })) t.Run("for NnfDataMovement", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovement{}, + Hub: &nnfv1alpha4.NnfDataMovement{}, Spoke: &NnfDataMovement{}, })) t.Run("for NnfDataMovementManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovementManager{}, + Hub: &nnfv1alpha4.NnfDataMovementManager{}, Spoke: &NnfDataMovementManager{}, })) t.Run("for NnfDataMovementProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovementProfile{}, + Hub: &nnfv1alpha4.NnfDataMovementProfile{}, Spoke: &NnfDataMovementProfile{}, })) t.Run("for NnfLustreMGT", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfLustreMGT{}, + Hub: &nnfv1alpha4.NnfLustreMGT{}, Spoke: &NnfLustreMGT{}, })) t.Run("for NnfNode", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNode{}, + Hub: &nnfv1alpha4.NnfNode{}, Spoke: &NnfNode{}, })) t.Run("for NnfNodeBlockStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeBlockStorage{}, + Hub: &nnfv1alpha4.NnfNodeBlockStorage{}, Spoke: &NnfNodeBlockStorage{}, })) t.Run("for NnfNodeECData", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeECData{}, + Hub: &nnfv1alpha4.NnfNodeECData{}, Spoke: &NnfNodeECData{}, })) t.Run("for NnfNodeStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeStorage{}, + Hub: &nnfv1alpha4.NnfNodeStorage{}, Spoke: &NnfNodeStorage{}, })) t.Run("for NnfPortManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfPortManager{}, + Hub: &nnfv1alpha4.NnfPortManager{}, Spoke: &NnfPortManager{}, })) t.Run("for NnfStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfStorage{}, + Hub: &nnfv1alpha4.NnfStorage{}, Spoke: &NnfStorage{}, })) t.Run("for NnfStorageProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfStorageProfile{}, + Hub: &nnfv1alpha4.NnfStorageProfile{}, Spoke: &NnfStorageProfile{}, })) t.Run("for NnfSystemStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfSystemStorage{}, + Hub: &nnfv1alpha4.NnfSystemStorage{}, Spoke: &NnfSystemStorage{}, })) diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go index c39a863f..b22c3173 100644 --- a/api/v1alpha1/doc.go +++ b/api/v1alpha1/doc.go @@ -19,5 +19,5 @@ // The following tag tells conversion-gen to generate conversion routines, and // it tells conversion-gen the name of the hub version. -// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha3 +// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha4 package v1alpha1 diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index 9b8d409f..c564d223 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" logf "sigs.k8s.io/controller-runtime/pkg/log" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" ) @@ -34,14 +34,14 @@ var convertlog = logf.Log.V(2).WithName("convert-v1alpha2") func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfAccess To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfAccess) + dst := dstRaw.(*nnfv1alpha4.NnfAccess) - if err := Convert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfAccess{} + restored := &nnfv1alpha4.NnfAccess{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -53,10 +53,10 @@ func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfAccess) + src := srcRaw.(*nnfv1alpha4.NnfAccess) convertlog.Info("Convert NnfAccess From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(src, dst, nil); err != nil { return err } @@ -66,14 +66,14 @@ func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfContainerProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfContainerProfile) + dst := dstRaw.(*nnfv1alpha4.NnfContainerProfile) - if err := Convert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfContainerProfile{} + restored := &nnfv1alpha4.NnfContainerProfile{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -85,10 +85,10 @@ func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfContainerProfile) + src := srcRaw.(*nnfv1alpha4.NnfContainerProfile) convertlog.Info("Convert NnfContainerProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(src, dst, nil); err != nil { return err } @@ -98,14 +98,14 @@ func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovement To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovement) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovement) - if err := Convert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovement{} + restored := &nnfv1alpha4.NnfDataMovement{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -117,10 +117,10 @@ func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovement) + src := srcRaw.(*nnfv1alpha4.NnfDataMovement) convertlog.Info("Convert NnfDataMovement From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement(src, dst, nil); err != nil { return err } @@ -130,14 +130,14 @@ func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovementManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovementManager) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementManager) - if err := Convert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovementManager{} + restored := &nnfv1alpha4.NnfDataMovementManager{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -149,10 +149,10 @@ func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovementManager) + src := srcRaw.(*nnfv1alpha4.NnfDataMovementManager) convertlog.Info("Convert NnfDataMovementManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(src, dst, nil); err != nil { return err } @@ -162,14 +162,14 @@ func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfDataMovementProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfDataMovementProfile) + dst := dstRaw.(*nnfv1alpha4.NnfDataMovementProfile) - if err := Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfDataMovementProfile{} + restored := &nnfv1alpha4.NnfDataMovementProfile{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -181,10 +181,10 @@ func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfDataMovementProfile) + src := srcRaw.(*nnfv1alpha4.NnfDataMovementProfile) convertlog.Info("Convert NnfDataMovementProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(src, dst, nil); err != nil { return err } @@ -194,14 +194,14 @@ func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfLustreMGT To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfLustreMGT) + dst := dstRaw.(*nnfv1alpha4.NnfLustreMGT) - if err := Convert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfLustreMGT{} + restored := &nnfv1alpha4.NnfLustreMGT{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -213,10 +213,10 @@ func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfLustreMGT) + src := srcRaw.(*nnfv1alpha4.NnfLustreMGT) convertlog.Info("Convert NnfLustreMGT From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(src, dst, nil); err != nil { return err } @@ -226,14 +226,14 @@ func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNode To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNode) + dst := dstRaw.(*nnfv1alpha4.NnfNode) - if err := Convert_v1alpha2_NnfNode_To_v1alpha3_NnfNode(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfNode_To_v1alpha4_NnfNode(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNode{} + restored := &nnfv1alpha4.NnfNode{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -245,10 +245,10 @@ func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNode) + src := srcRaw.(*nnfv1alpha4.NnfNode) convertlog.Info("Convert NnfNode From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNode_To_v1alpha2_NnfNode(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNode_To_v1alpha2_NnfNode(src, dst, nil); err != nil { return err } @@ -258,14 +258,14 @@ func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeBlockStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeBlockStorage) + dst := dstRaw.(*nnfv1alpha4.NnfNodeBlockStorage) - if err := Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeBlockStorage{} + restored := &nnfv1alpha4.NnfNodeBlockStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -277,10 +277,10 @@ func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeBlockStorage) + src := srcRaw.(*nnfv1alpha4.NnfNodeBlockStorage) convertlog.Info("Convert NnfNodeBlockStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(src, dst, nil); err != nil { return err } @@ -290,14 +290,14 @@ func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeECData To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeECData) + dst := dstRaw.(*nnfv1alpha4.NnfNodeECData) - if err := Convert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeECData{} + restored := &nnfv1alpha4.NnfNodeECData{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -309,10 +309,10 @@ func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeECData) + src := srcRaw.(*nnfv1alpha4.NnfNodeECData) convertlog.Info("Convert NnfNodeECData From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData(src, dst, nil); err != nil { return err } @@ -322,14 +322,14 @@ func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfNodeStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfNodeStorage) + dst := dstRaw.(*nnfv1alpha4.NnfNodeStorage) - if err := Convert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfNodeStorage{} + restored := &nnfv1alpha4.NnfNodeStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -341,10 +341,10 @@ func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfNodeStorage) + src := srcRaw.(*nnfv1alpha4.NnfNodeStorage) convertlog.Info("Convert NnfNodeStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(src, dst, nil); err != nil { return err } @@ -354,14 +354,14 @@ func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfPortManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfPortManager) + dst := dstRaw.(*nnfv1alpha4.NnfPortManager) - if err := Convert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfPortManager{} + restored := &nnfv1alpha4.NnfPortManager{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -373,10 +373,10 @@ func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfPortManager) + src := srcRaw.(*nnfv1alpha4.NnfPortManager) convertlog.Info("Convert NnfPortManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager(src, dst, nil); err != nil { return err } @@ -386,14 +386,14 @@ func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfStorage) + dst := dstRaw.(*nnfv1alpha4.NnfStorage) - if err := Convert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfStorage{} + restored := &nnfv1alpha4.NnfStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -405,10 +405,10 @@ func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfStorage) + src := srcRaw.(*nnfv1alpha4.NnfStorage) convertlog.Info("Convert NnfStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(src, dst, nil); err != nil { return err } @@ -418,14 +418,14 @@ func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfStorageProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfStorageProfile) + dst := dstRaw.(*nnfv1alpha4.NnfStorageProfile) - if err := Convert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfStorageProfile{} + restored := &nnfv1alpha4.NnfStorageProfile{} hasAnno, err := utilconversion.UnmarshalData(src, restored) if err != nil { return err @@ -455,10 +455,10 @@ func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfStorageProfile) + src := srcRaw.(*nnfv1alpha4.NnfStorageProfile) convertlog.Info("Convert NnfStorageProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(src, dst, nil); err != nil { return err } @@ -468,14 +468,14 @@ func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { convertlog.Info("Convert NnfSystemStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha3.NnfSystemStorage) + dst := dstRaw.(*nnfv1alpha4.NnfSystemStorage) - if err := Convert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &nnfv1alpha3.NnfSystemStorage{} + restored := &nnfv1alpha4.NnfSystemStorage{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -487,10 +487,10 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { } func (dst *NnfSystemStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha3.NnfSystemStorage) + src := srcRaw.(*nnfv1alpha4.NnfSystemStorage) convertlog.Info("Convert NnfSystemStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - if err := Convert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(src, dst, nil); err != nil { + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(src, dst, nil); err != nil { return err } @@ -622,10 +622,10 @@ func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { // The conversion-gen tool dropped these from zz_generated.conversion.go to // force us to acknowledge that we are addressing the conversion requirements. -func Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *nnfv1alpha3.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in, out, s) +func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *nnfv1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in, out, s) } -func Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *nnfv1alpha3.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in, out, s) +func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in, out, s) } diff --git a/api/v1alpha2/conversion_test.go b/api/v1alpha2/conversion_test.go index a3ef55b9..ed0bd750 100644 --- a/api/v1alpha2/conversion_test.go +++ b/api/v1alpha2/conversion_test.go @@ -24,79 +24,79 @@ import ( . "github.com/onsi/ginkgo/v2" - nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for NnfAccess", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfAccess{}, + Hub: &nnfv1alpha4.NnfAccess{}, Spoke: &NnfAccess{}, })) t.Run("for NnfContainerProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfContainerProfile{}, + Hub: &nnfv1alpha4.NnfContainerProfile{}, Spoke: &NnfContainerProfile{}, })) t.Run("for NnfDataMovement", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovement{}, + Hub: &nnfv1alpha4.NnfDataMovement{}, Spoke: &NnfDataMovement{}, })) t.Run("for NnfDataMovementManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovementManager{}, + Hub: &nnfv1alpha4.NnfDataMovementManager{}, Spoke: &NnfDataMovementManager{}, })) t.Run("for NnfDataMovementProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfDataMovementProfile{}, + Hub: &nnfv1alpha4.NnfDataMovementProfile{}, Spoke: &NnfDataMovementProfile{}, })) t.Run("for NnfLustreMGT", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfLustreMGT{}, + Hub: &nnfv1alpha4.NnfLustreMGT{}, Spoke: &NnfLustreMGT{}, })) t.Run("for NnfNode", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNode{}, + Hub: &nnfv1alpha4.NnfNode{}, Spoke: &NnfNode{}, })) t.Run("for NnfNodeBlockStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeBlockStorage{}, + Hub: &nnfv1alpha4.NnfNodeBlockStorage{}, Spoke: &NnfNodeBlockStorage{}, })) t.Run("for NnfNodeECData", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeECData{}, + Hub: &nnfv1alpha4.NnfNodeECData{}, Spoke: &NnfNodeECData{}, })) t.Run("for NnfNodeStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfNodeStorage{}, + Hub: &nnfv1alpha4.NnfNodeStorage{}, Spoke: &NnfNodeStorage{}, })) t.Run("for NnfPortManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfPortManager{}, + Hub: &nnfv1alpha4.NnfPortManager{}, Spoke: &NnfPortManager{}, })) t.Run("for NnfStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfStorage{}, + Hub: &nnfv1alpha4.NnfStorage{}, Spoke: &NnfStorage{}, })) t.Run("for NnfStorageProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfStorageProfile{}, + Hub: &nnfv1alpha4.NnfStorageProfile{}, Spoke: &NnfStorageProfile{}, })) t.Run("for NnfSystemStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha3.NnfSystemStorage{}, + Hub: &nnfv1alpha4.NnfSystemStorage{}, Spoke: &NnfSystemStorage{}, })) diff --git a/api/v1alpha2/doc.go b/api/v1alpha2/doc.go index 3abb1764..d98f23f9 100644 --- a/api/v1alpha2/doc.go +++ b/api/v1alpha2/doc.go @@ -19,5 +19,5 @@ // The following tag tells conversion-gen to generate conversion routines, and // it tells conversion-gen the name of the hub version. -// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha3 +// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha4 package v1alpha2 From a7bd414c85ec3304b08231af170f23254b0072e8 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 13 Nov 2024 15:45:33 -0600 Subject: [PATCH 08/23] CRDBUMPER-auto-gens Make the auto-generated files. Update the SRC_DIRS spoke list in the Makefile. make manifests & make generate & make generate-go-conversions make fmt ACTION: If any of the code in this repo was referencing non-local APIs, the references to them may have been inadvertently modified. Verify that any non-local APIs are being referenced by their correct versions. ACTION: Begin by running "make vet". Repair any issues that it finds. Then run "make test" and continue repairing issues until the tests pass. Signed-off-by: Blake Devcich --- Makefile | 2 +- api/v1alpha1/zz_generated.conversion.go | 2254 +-- api/v1alpha2/zz_generated.conversion.go | 2256 +-- api/v1alpha3/zz_generated.conversion.go | 3207 ++++ api/v1alpha3/zz_generated.deepcopy.go | 2 +- api/v1alpha4/zz_generated.deepcopy.go | 902 +- .../bases/nnf.cray.hpe.com_nnfaccesses.yaml | 250 + ...nnf.cray.hpe.com_nnfcontainerprofiles.yaml | 14845 ++++++++++++++++ ....cray.hpe.com_nnfdatamovementmanagers.yaml | 7378 ++++++++ ....cray.hpe.com_nnfdatamovementprofiles.yaml | 126 + .../nnf.cray.hpe.com_nnfdatamovements.yaml | 406 + .../bases/nnf.cray.hpe.com_nnflustremgts.yaml | 271 + ...nnf.cray.hpe.com_nnfnodeblockstorages.yaml | 163 + .../bases/nnf.cray.hpe.com_nnfnodeecdata.yaml | 40 + .../crd/bases/nnf.cray.hpe.com_nnfnodes.yaml | 160 + .../nnf.cray.hpe.com_nnfnodestorages.yaml | 219 + .../nnf.cray.hpe.com_nnfportmanagers.yaml | 237 + .../nnf.cray.hpe.com_nnfstorageprofiles.yaml | 674 + .../bases/nnf.cray.hpe.com_nnfstorages.yaml | 295 + .../nnf.cray.hpe.com_nnfsystemstorages.yaml | 240 + 20 files changed, 31603 insertions(+), 2324 deletions(-) create mode 100644 api/v1alpha3/zz_generated.conversion.go diff --git a/Makefile b/Makefile index e29506aa..bf3e06df 100644 --- a/Makefile +++ b/Makefile @@ -409,7 +409,7 @@ $(CONVERSION_GEN): $(LOCALBIN) # Build conversion-gen from tools folder. # The SRC_DIRS value is a space-separated list of paths to old versions. # The --input-dirs value is a single path item; specify multiple --input-dirs # parameters if you have multiple old versions. -SRC_DIRS=./api/v1alpha1 ./api/v1alpha2 +SRC_DIRS=./api/v1alpha1 ./api/v1alpha2 ./api/v1alpha3 generate-go-conversions: $(CONVERSION_GEN) ## Generate conversions go code $(MAKE) clean-generated-conversions SRC_DIRS="$(SRC_DIRS)" $(CONVERSION_GEN) \ diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 5a6ab732..ea29aa94 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( unsafe "unsafe" v1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - v1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + v1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,840 +43,840 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha3.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha3.LustreStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha4.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha4.LustreStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(a.(*v1alpha3.LustreStorageSpec), b.(*LustreStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha3.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess(a.(*NnfAccess), b.(*v1alpha3.NnfAccess), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess(a.(*v1alpha3.NnfAccess), b.(*NnfAccess), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(a.(*v1alpha4.NnfAccess), b.(*NnfAccess), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha3.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessList_To_v1alpha3_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha3.NnfAccessList), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha4.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha4.NnfAccessList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList(a.(*v1alpha3.NnfAccessList), b.(*NnfAccessList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(a.(*v1alpha4.NnfAccessList), b.(*NnfAccessList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha3.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha3.NnfAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha4.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha4.NnfAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(a.(*v1alpha3.NnfAccessSpec), b.(*NnfAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha3.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha3.NnfAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(a.(*v1alpha3.NnfAccessStatus), b.(*NnfAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(a.(*v1alpha4.NnfAccessStatus), b.(*NnfAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha3.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha3.NnfContainerProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha4.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha4.NnfContainerProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(a.(*v1alpha3.NnfContainerProfile), b.(*NnfContainerProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(a.(*v1alpha4.NnfContainerProfile), b.(*NnfContainerProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha3.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha3.NnfContainerProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha4.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha4.NnfContainerProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(a.(*v1alpha3.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(a.(*v1alpha4.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha3.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha3.NnfContainerProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha4.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha4.NnfContainerProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(a.(*v1alpha3.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(a.(*v1alpha4.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha3.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha3.NnfContainerProfileStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha4.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha4.NnfContainerProfileStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(a.(*v1alpha3.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(a.(*v1alpha4.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha3.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha3.NnfDataMovement), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha4.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha4.NnfDataMovement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement(a.(*v1alpha3.NnfDataMovement), b.(*NnfDataMovement), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(a.(*v1alpha4.NnfDataMovement), b.(*NnfDataMovement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha3.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha3.NnfDataMovementCommandStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha4.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha4.NnfDataMovementCommandStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(a.(*v1alpha3.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(a.(*v1alpha4.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha3.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha3.NnfDataMovementConfig), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha4.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha4.NnfDataMovementConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(a.(*v1alpha3.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(a.(*v1alpha4.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha3.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha3.NnfDataMovementList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha4.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha4.NnfDataMovementList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(a.(*v1alpha3.NnfDataMovementList), b.(*NnfDataMovementList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(a.(*v1alpha4.NnfDataMovementList), b.(*NnfDataMovementList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha3.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha3.NnfDataMovementManager), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha4.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha4.NnfDataMovementManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(a.(*v1alpha3.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(a.(*v1alpha4.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha3.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha3.NnfDataMovementManagerList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha4.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha4.NnfDataMovementManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(a.(*v1alpha3.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(a.(*v1alpha4.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha3.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha3.NnfDataMovementManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha4.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha4.NnfDataMovementManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(a.(*v1alpha3.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(a.(*v1alpha4.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha3.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha3.NnfDataMovementManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha4.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha4.NnfDataMovementManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(a.(*v1alpha3.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(a.(*v1alpha4.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha3.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha3.NnfDataMovementProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha4.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha4.NnfDataMovementProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(a.(*v1alpha3.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(a.(*v1alpha4.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha3.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha3.NnfDataMovementProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha4.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha4.NnfDataMovementProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(a.(*v1alpha3.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha3.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha3.NnfDataMovementProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(a.(*v1alpha3.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(a.(*v1alpha4.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha3.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha3.NnfDataMovementSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha4.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha4.NnfDataMovementSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(a.(*v1alpha3.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(a.(*v1alpha4.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha3.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha3.NnfDataMovementSpecSourceDestination), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha4.NnfDataMovementSpecSourceDestination), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(a.(*v1alpha3.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(a.(*v1alpha4.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha3.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha3.NnfDataMovementStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha4.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha4.NnfDataMovementStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(a.(*v1alpha3.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(a.(*v1alpha4.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha3.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha3.NnfDriveStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha4.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha4.NnfDriveStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(a.(*v1alpha3.NnfDriveStatus), b.(*NnfDriveStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(a.(*v1alpha4.NnfDriveStatus), b.(*NnfDriveStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha3.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha3.NnfLustreMGT), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha4.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha4.NnfLustreMGT), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(a.(*v1alpha3.NnfLustreMGT), b.(*NnfLustreMGT), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(a.(*v1alpha4.NnfLustreMGT), b.(*NnfLustreMGT), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha3.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha3.NnfLustreMGTList), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha4.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha4.NnfLustreMGTList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(a.(*v1alpha3.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(a.(*v1alpha4.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha3.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha3.NnfLustreMGTSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha4.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha4.NnfLustreMGTSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(a.(*v1alpha3.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(a.(*v1alpha4.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha3.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha3.NnfLustreMGTStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha4.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha4.NnfLustreMGTStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(a.(*v1alpha3.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(a.(*v1alpha4.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha3.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha3.NnfLustreMGTStatusClaim), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha4.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha4.NnfLustreMGTStatusClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(a.(*v1alpha3.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(a.(*v1alpha4.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha3.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNode_To_v1alpha3_NnfNode(a.(*NnfNode), b.(*v1alpha3.NnfNode), scope) + if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha4.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(a.(*NnfNode), b.(*v1alpha4.NnfNode), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNode_To_v1alpha1_NnfNode(a.(*v1alpha3.NnfNode), b.(*NnfNode), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(a.(*v1alpha4.NnfNode), b.(*NnfNode), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha3.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha3.NnfNodeBlockStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha4.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha4.NnfNodeBlockStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(a.(*v1alpha3.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(a.(*v1alpha4.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha3.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha3.NnfNodeBlockStorageAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha4.NnfNodeBlockStorageAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(a.(*v1alpha3.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(a.(*v1alpha4.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha3.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha3.NnfNodeBlockStorageAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha3.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha3.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha3.NnfNodeBlockStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha3.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha3.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha3.NnfNodeBlockStorageDeviceStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha3.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha3.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha3.NnfNodeBlockStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha4.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha4.NnfNodeBlockStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(a.(*v1alpha3.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(a.(*v1alpha4.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha3.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha3.NnfNodeBlockStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha4.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha4.NnfNodeBlockStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(a.(*v1alpha3.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(a.(*v1alpha4.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha3.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha3.NnfNodeBlockStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha4.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha4.NnfNodeBlockStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(a.(*v1alpha3.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(a.(*v1alpha4.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha3.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha3.NnfNodeECData), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha4.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha4.NnfNodeECData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData(a.(*v1alpha3.NnfNodeECData), b.(*NnfNodeECData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(a.(*v1alpha4.NnfNodeECData), b.(*NnfNodeECData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha3.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha3.NnfNodeECDataList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha4.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha4.NnfNodeECDataList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(a.(*v1alpha3.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(a.(*v1alpha4.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha3.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha3.NnfNodeECDataSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha4.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha4.NnfNodeECDataSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(a.(*v1alpha3.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(a.(*v1alpha4.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha3.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha3.NnfNodeECDataStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha4.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha4.NnfNodeECDataStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(a.(*v1alpha3.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(a.(*v1alpha4.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha3.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeList_To_v1alpha3_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha3.NnfNodeList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha4.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha4.NnfNodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeList_To_v1alpha1_NnfNodeList(a.(*v1alpha3.NnfNodeList), b.(*NnfNodeList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(a.(*v1alpha4.NnfNodeList), b.(*NnfNodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha3.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha3.NnfNodeSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha4.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha4.NnfNodeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(a.(*v1alpha3.NnfNodeSpec), b.(*NnfNodeSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(a.(*v1alpha4.NnfNodeSpec), b.(*NnfNodeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha3.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha3.NnfNodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha4.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha4.NnfNodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(a.(*v1alpha3.NnfNodeStatus), b.(*NnfNodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(a.(*v1alpha4.NnfNodeStatus), b.(*NnfNodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha3.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha3.NnfNodeStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha4.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha4.NnfNodeStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(a.(*v1alpha3.NnfNodeStorage), b.(*NnfNodeStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(a.(*v1alpha4.NnfNodeStorage), b.(*NnfNodeStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha3.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha3.NnfNodeStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha4.NnfNodeStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(a.(*v1alpha3.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(a.(*v1alpha4.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha3.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha3.NnfNodeStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha4.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha4.NnfNodeStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(a.(*v1alpha3.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(a.(*v1alpha4.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha3.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha3.NnfNodeStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha4.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha4.NnfNodeStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(a.(*v1alpha3.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(a.(*v1alpha4.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha3.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha3.NnfNodeStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha4.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha4.NnfNodeStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(a.(*v1alpha3.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(a.(*v1alpha4.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha3.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha3.NnfPortManager), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha4.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha4.NnfPortManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager(a.(*v1alpha3.NnfPortManager), b.(*NnfPortManager), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(a.(*v1alpha4.NnfPortManager), b.(*NnfPortManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha3.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha3.NnfPortManagerAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha4.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha4.NnfPortManagerAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(a.(*v1alpha3.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(a.(*v1alpha4.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha3.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha3.NnfPortManagerAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha4.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha4.NnfPortManagerAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(a.(*v1alpha3.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(a.(*v1alpha4.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha3.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha3.NnfPortManagerList), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha4.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha4.NnfPortManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(a.(*v1alpha3.NnfPortManagerList), b.(*NnfPortManagerList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(a.(*v1alpha4.NnfPortManagerList), b.(*NnfPortManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha3.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha3.NnfPortManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha4.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha4.NnfPortManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(a.(*v1alpha3.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(a.(*v1alpha4.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha3.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha3.NnfPortManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha4.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha4.NnfPortManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(a.(*v1alpha3.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(a.(*v1alpha4.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha3.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha3.NnfResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha4.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha4.NnfResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(a.(*v1alpha3.NnfResourceStatus), b.(*NnfResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(a.(*v1alpha4.NnfResourceStatus), b.(*NnfResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha3.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfServerStatus_To_v1alpha3_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha3.NnfServerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha4.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha4.NnfServerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfServerStatus_To_v1alpha1_NnfServerStatus(a.(*v1alpha3.NnfServerStatus), b.(*NnfServerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(a.(*v1alpha4.NnfServerStatus), b.(*NnfServerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha3.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage(a.(*NnfStorage), b.(*v1alpha3.NnfStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha4.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(a.(*NnfStorage), b.(*v1alpha4.NnfStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage(a.(*v1alpha3.NnfStorage), b.(*NnfStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(a.(*v1alpha4.NnfStorage), b.(*NnfStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha3.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha3.NnfStorageAllocationNodes), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha4.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha4.NnfStorageAllocationNodes), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(a.(*v1alpha3.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(a.(*v1alpha4.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha3.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha3.NnfStorageAllocationSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha4.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha4.NnfStorageAllocationSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(a.(*v1alpha3.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(a.(*v1alpha4.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha3.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha3.NnfStorageAllocationSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha4.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha4.NnfStorageAllocationSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(a.(*v1alpha3.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(a.(*v1alpha4.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha3.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageList_To_v1alpha3_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha3.NnfStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha4.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha4.NnfStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageList_To_v1alpha1_NnfStorageList(a.(*v1alpha3.NnfStorageList), b.(*NnfStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(a.(*v1alpha4.NnfStorageList), b.(*NnfStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha3.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha3.NnfStorageLustreSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha4.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha4.NnfStorageLustreSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(a.(*v1alpha3.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(a.(*v1alpha4.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha3.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha3.NnfStorageLustreStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha4.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha4.NnfStorageLustreStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(a.(*v1alpha3.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha3.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha3.NnfStorageProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(a.(*v1alpha3.NnfStorageProfile), b.(*NnfStorageProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(a.(*v1alpha4.NnfStorageProfile), b.(*NnfStorageProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha3.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha3.NnfStorageProfileCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha3.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha3.NnfStorageProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha4.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha4.NnfStorageProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(a.(*v1alpha3.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(a.(*v1alpha4.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha3.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha3.NnfStorageProfileGFS2Data), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha4.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha4.NnfStorageProfileGFS2Data), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(a.(*v1alpha3.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(a.(*v1alpha4.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha3.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha3.NnfStorageProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha4.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha4.NnfStorageProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(a.(*v1alpha3.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(a.(*v1alpha4.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha3.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha3.NnfStorageProfileLustreCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha4.NnfStorageProfileLustreCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha3.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha3.NnfStorageProfileLustreData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha4.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha4.NnfStorageProfileLustreData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(a.(*v1alpha3.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(a.(*v1alpha4.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha3.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha3.NnfStorageProfileLustreMiscOptions), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha4.NnfStorageProfileLustreMiscOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(a.(*v1alpha3.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(a.(*v1alpha4.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha3.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha3.NnfStorageProfileRawData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha4.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha4.NnfStorageProfileRawData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(a.(*v1alpha3.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(a.(*v1alpha4.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha3.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha3.NnfStorageProfileXFSData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha4.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha4.NnfStorageProfileXFSData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(a.(*v1alpha3.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(a.(*v1alpha4.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha3.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha3.NnfStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha4.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha4.NnfStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(a.(*v1alpha3.NnfStorageSpec), b.(*NnfStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(a.(*v1alpha4.NnfStorageSpec), b.(*NnfStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha3.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha3.NnfStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha4.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha4.NnfStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(a.(*v1alpha3.NnfStorageStatus), b.(*NnfStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(a.(*v1alpha4.NnfStorageStatus), b.(*NnfStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha3.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha3.NnfSystemStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha4.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha4.NnfSystemStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(a.(*v1alpha3.NnfSystemStorage), b.(*NnfSystemStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(a.(*v1alpha4.NnfSystemStorage), b.(*NnfSystemStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha3.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha3.NnfSystemStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha4.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha4.NnfSystemStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(a.(*v1alpha3.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(a.(*v1alpha4.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha3.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha3.NnfSystemStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha4.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha4.NnfSystemStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha3.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha3.NnfSystemStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(a.(*v1alpha3.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(a.(*v1alpha4.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1alpha3.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(a.(*v1alpha3.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1alpha3.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(a.(*v1alpha3.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1alpha3.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(a.(*v1alpha3.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + if err := s.AddConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) }); err != nil { return err } return nil } -func autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha3.LustreStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { out.FileSystemName = in.FileSystemName out.TargetType = in.TargetType out.StartIndex = in.StartIndex @@ -885,12 +885,12 @@ func autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *Lu return nil } -// Convert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha3.LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in, out, s) +// Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in, out, s) } -func autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha3.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { out.FileSystemName = in.FileSystemName out.TargetType = in.TargetType out.StartIndex = in.StartIndex @@ -899,66 +899,66 @@ func autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1 return nil } -// Convert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha3.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in, out, s) +// Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in, out, s) } -func autoConvert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess(in *NnfAccess, out *v1alpha3.NnfAccess, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess(in *NnfAccess, out *v1alpha3.NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccess_To_v1alpha3_NnfAccess(in, out, s) +// Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in, out, s) } -func autoConvert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha3.NnfAccess, out *NnfAccess, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha3.NnfAccess, out *NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccess_To_v1alpha1_NnfAccess(in, out, s) +// Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in, out, s) } -func autoConvert_v1alpha1_NnfAccessList_To_v1alpha3_NnfAccessList(in *NnfAccessList, out *v1alpha3.NnfAccessList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfAccess)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfAccessList_To_v1alpha3_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessList_To_v1alpha3_NnfAccessList(in *NnfAccessList, out *v1alpha3.NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessList_To_v1alpha3_NnfAccessList(in, out, s) +// Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in, out, s) } -func autoConvert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha3.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha3.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList(in, out, s) +// Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in, out, s) } -func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) out.Target = in.Target @@ -972,12 +972,12 @@ func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessS return nil } -// Convert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) +// Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in, out, s) } -func autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha3.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) out.Target = in.Target @@ -991,64 +991,64 @@ func autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha3.N return nil } -// Convert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha3.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in, out, s) +// Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in, out, s) } -func autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in, out, s) +// Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in, out, s) } -func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in, out, s) +// Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in, out, s) } -func autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha3.NnfContainerProfile, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha3.NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in, out, s) +// Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha3.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha3.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in, out, s) +// Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in, out, s) } -func autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha3.NnfContainerProfileData, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { out.Pinned = in.Pinned - out.Storages = *(*[]v1alpha3.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.Storages = *(*[]v1alpha4.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) out.RetryLimit = in.RetryLimit @@ -1060,12 +1060,12 @@ func autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfil return nil } -// Convert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha3.NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in, out, s) +// Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha3.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { out.Pinned = in.Pinned out.Storages = *(*[]NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) @@ -1079,90 +1079,90 @@ func autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfil return nil } -// Convert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha3.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in, out, s) +// Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in, out, s) } -func autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha3.NnfContainerProfileList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfContainerProfile)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfContainerProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha3.NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in, out, s) +// Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha3.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfContainerProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha3.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in, out, s) +// Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in, out, s) } -func autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha3.NnfContainerProfileStorage, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { out.Name = in.Name out.Optional = in.Optional out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) return nil } -// Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha3.NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in, out, s) +// Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha3.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { out.Name = in.Name out.Optional = in.Optional out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) return nil } -// Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha3.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in, out, s) +// Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *NnfDataMovement, out *v1alpha3.NnfDataMovement, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *NnfDataMovement, out *v1alpha3.NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovement_To_v1alpha3_NnfDataMovement(in, out, s) +// Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha3.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha3.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovement_To_v1alpha1_NnfDataMovement(in, out, s) +// Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha3.NnfDataMovementCommandStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { out.Command = in.Command out.ElapsedTime = in.ElapsedTime out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) @@ -1178,12 +1178,12 @@ func autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMoveme return nil } -// Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha3.NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in, out, s) +// Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha3.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { out.Command = in.Command out.ElapsedTime = in.ElapsedTime out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) @@ -1199,12 +1199,12 @@ func autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMoveme return nil } -// Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha3.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha3.NnfDataMovementConfig, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { out.Dryrun = in.Dryrun out.MpirunOptions = in.MpirunOptions out.DcpOptions = in.DcpOptions @@ -1215,12 +1215,12 @@ func autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfi return nil } -// Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha3.NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in, out, s) +// Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha3.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { out.Dryrun = in.Dryrun out.MpirunOptions = in.MpirunOptions out.DcpOptions = in.DcpOptions @@ -1231,88 +1231,88 @@ func autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfi return nil } -// Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha3.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in, out, s) +// Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha3.NnfDataMovementList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovement)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovement)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfDataMovementList_To_v1alpha3_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha3.NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in, out, s) +// Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha3.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovement)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementList_To_v1alpha1_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha3.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in, out, s) +// Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha3.NnfDataMovementManager, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha3.NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in, out, s) +// Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha3.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha3.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in, out, s) +// Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha3.NnfDataMovementManagerList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha3.NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in, out, s) +// Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha3.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovementManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha3.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha3.NnfDataMovementManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { out.Selector = in.Selector out.Template = in.Template out.UpdateStrategy = in.UpdateStrategy @@ -1321,12 +1321,12 @@ func autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovement return nil } -// Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha3.NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in, out, s) +// Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha3.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { out.Selector = in.Selector out.Template = in.Template out.UpdateStrategy = in.UpdateStrategy @@ -1335,58 +1335,58 @@ func autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovement return nil } -// Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha3.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha3.NnfDataMovementManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { out.Ready = in.Ready return nil } -// Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha3.NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in, out, s) +// Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha3.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { out.Ready = in.Ready return nil } -// Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha3.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha3.NnfDataMovementProfile, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha3.NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in, out, s) +// Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha3.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha3.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha3.NnfDataMovementProfileData, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned out.Slots = in.Slots @@ -1400,12 +1400,12 @@ func autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovement return nil } -// Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha3.NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) +// Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha3.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned out.Slots = in.Slots @@ -1419,50 +1419,50 @@ func autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovement return nil } -// Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha3.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha3.NnfDataMovementProfileList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha3.NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in, out, s) +// Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha3.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha3.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha3.NnfDataMovementSpec, s conversion.Scope) error { - out.Source = (*v1alpha3.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) - out.Destination = (*v1alpha3.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) +func autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) out.UserId = in.UserId out.GroupId = in.GroupId out.Cancel = in.Cancel out.ProfileReference = in.ProfileReference - out.UserConfig = (*v1alpha3.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + out.UserConfig = (*v1alpha4.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) return nil } -// Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha3.NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in, out, s) +// Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha3.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { out.Source = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) out.Destination = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) out.UserId = in.UserId @@ -1473,51 +1473,51 @@ func autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in return nil } -// Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha3.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in, out, s) +// Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha3.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { out.Path = in.Path out.StorageReference = in.StorageReference return nil } -// Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha3.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in, out, s) +// Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha3.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { out.Path = in.Path out.StorageReference = in.StorageReference return nil } -// Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha3.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in, out, s) +// Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in, out, s) } -func autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha3.NnfDataMovementStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { out.State = in.State out.Status = in.Status out.Message = in.Message out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts - out.CommandStatus = (*v1alpha3.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.CommandStatus = (*v1alpha4.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha3.NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in, out, s) +// Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha3.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { out.State = in.State out.Status = in.Status out.Message = in.Message @@ -1529,102 +1529,102 @@ func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatu return nil } -// Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha3.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in, out, s) } -func autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha3.NnfDriveStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { out.Model = in.Model out.SerialNumber = in.SerialNumber out.FirmwareVersion = in.FirmwareVersion out.Slot = in.Slot out.Capacity = in.Capacity out.WearLevel = in.WearLevel - if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfDriveStatus_To_v1alpha3_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha3.NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in, out, s) +// Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha3.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { out.Model = in.Model out.SerialNumber = in.SerialNumber out.FirmwareVersion = in.FirmwareVersion out.Slot = in.Slot out.Capacity = in.Capacity out.WearLevel = in.WearLevel - if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDriveStatus_To_v1alpha1_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha3.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in, out, s) +// Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in, out, s) } -func autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha3.NnfLustreMGT, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha3.NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in, out, s) +// Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha3.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha3.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in, out, s) +// Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in, out, s) } -func autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha3.NnfLustreMGTList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfLustreMGT)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfLustreMGT)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha3.NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in, out, s) +// Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha3.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfLustreMGT)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha3.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in, out, s) +// Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in, out, s) } -func autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha3.NnfLustreMGTSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) out.FsNameStart = in.FsNameStart @@ -1633,12 +1633,12 @@ func autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfL return nil } -// Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha3.NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in, out, s) +// Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha3.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) out.FsNameStart = in.FsNameStart @@ -1647,179 +1647,179 @@ func autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1al return nil } -// Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha3.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in, out, s) +// Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in, out, s) } -func autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext - out.ClaimList = *(*[]v1alpha3.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ClaimList = *(*[]v1alpha4.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in, out, s) +// Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in, out, s) +// Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in, out, s) } -func autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha3.NnfLustreMGTStatusClaim, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { out.Reference = in.Reference out.FsName = in.FsName return nil } -// Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha3.NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in, out, s) +// Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha3.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { out.Reference = in.Reference out.FsName = in.FsName return nil } -// Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha3.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in, out, s) +// Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in, out, s) } -func autoConvert_v1alpha1_NnfNode_To_v1alpha3_NnfNode(in *NnfNode, out *v1alpha3.NnfNode, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfNode_To_v1alpha3_NnfNode is an autogenerated conversion function. -func Convert_v1alpha1_NnfNode_To_v1alpha3_NnfNode(in *NnfNode, out *v1alpha3.NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNode_To_v1alpha3_NnfNode(in, out, s) +// Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode is an autogenerated conversion function. +func Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in, out, s) } -func autoConvert_v1alpha3_NnfNode_To_v1alpha1_NnfNode(in *v1alpha3.NnfNode, out *NnfNode, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNode_To_v1alpha1_NnfNode is an autogenerated conversion function. -func Convert_v1alpha3_NnfNode_To_v1alpha1_NnfNode(in *v1alpha3.NnfNode, out *NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNode_To_v1alpha1_NnfNode(in, out, s) +// Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode is an autogenerated conversion function. +func Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha3.NnfNodeBlockStorage, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha3.NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha3.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha3.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha3.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) out.StorageGroupId = in.StorageGroupId return nil } -// Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha3.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha3.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) out.StorageGroupId = in.StorageGroupId return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha3.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha3.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { out.Capacity = in.Capacity out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) return nil } -// Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha3.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha3.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { out.Capacity = in.Capacity out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha3.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha3.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - out.Accesses = *(*map[string]v1alpha3.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) - out.Devices = *(*[]v1alpha3.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) +func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]v1alpha4.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]v1alpha4.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) out.CapacityAllocated = in.CapacityAllocated out.StoragePoolId = in.StoragePoolId return nil } -// Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha3.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha3.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { out.Accesses = *(*map[string]NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) out.Devices = *(*[]NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) out.CapacityAllocated = in.CapacityAllocated @@ -1827,93 +1827,93 @@ func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNod return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha3.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha3.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { out.NQN = in.NQN out.NamespaceId = in.NamespaceId out.CapacityAllocated = in.CapacityAllocated return nil } -// Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha3.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha3.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { out.NQN = in.NQN out.NamespaceId = in.NamespaceId out.CapacityAllocated = in.CapacityAllocated return nil } -// Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha3.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha3.NnfNodeBlockStorageList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha3.NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha3.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha3.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha3.NnfNodeBlockStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { out.SharedAllocation = in.SharedAllocation - out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha3.NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha3.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { out.SharedAllocation = in.SharedAllocation out.Allocations = *(*[]NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha3.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in, out, s) } -func autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) +func autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.ResourceError = in.ResourceError out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil } -// Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in, out, s) +// Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.ResourceError = in.ResourceError out.PodStartTime = in.PodStartTime @@ -1921,167 +1921,167 @@ func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStor return nil } -// Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *NnfNodeECData, out *v1alpha3.NnfNodeECData, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *NnfNodeECData, out *v1alpha3.NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECData_To_v1alpha3_NnfNodeECData(in, out, s) +// Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha3.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha3.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECData_To_v1alpha1_NnfNodeECData(in, out, s) +// Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in, out, s) } -func autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha3.NnfNodeECDataList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeECData)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeECData)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha3.NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in, out, s) +// Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha3.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeECData)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha3.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in, out, s) +// Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in, out, s) } -func autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha3.NnfNodeECDataSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { return nil } -// Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha3.NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in, out, s) +// Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha3.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { return nil } -// Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha3.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in, out, s) +// Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in, out, s) } -func autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha3.NnfNodeECDataStatus, s conversion.Scope) error { - out.Data = *(*map[string]v1alpha3.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) +func autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]v1alpha4.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) return nil } -// Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha3.NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in, out, s) +// Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha3.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { out.Data = *(*map[string]NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) return nil } -// Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha3.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in, out, s) +// Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeList_To_v1alpha3_NnfNodeList(in *NnfNodeList, out *v1alpha3.NnfNodeList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNode)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNode)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfNodeList_To_v1alpha3_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeList_To_v1alpha3_NnfNodeList(in *NnfNodeList, out *v1alpha3.NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeList_To_v1alpha3_NnfNodeList(in, out, s) +// Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha3.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNode)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeList_To_v1alpha1_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha3.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeList_To_v1alpha1_NnfNodeList(in, out, s) +// Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in, out, s) } -func autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha3.NnfNodeSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { out.Name = in.Name out.Pod = in.Pod - out.State = v1alpha3.NnfResourceStateType(in.State) + out.State = v1alpha4.NnfResourceStateType(in.State) return nil } -// Convert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha3.NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in, out, s) +// Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha3.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { out.Name = in.Name out.Pod = in.Pod out.State = NnfResourceStateType(in.State) return nil } -// Convert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha3.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in, out, s) +// Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha3.NnfNodeStatus, s conversion.Scope) error { - out.Status = v1alpha3.NnfResourceStatusType(in.Status) - out.Health = v1alpha3.NnfResourceHealthType(in.Health) +func autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) out.Fenced = in.Fenced out.LNetNid = in.LNetNid out.Capacity = in.Capacity out.CapacityAllocated = in.CapacityAllocated - out.Servers = *(*[]v1alpha3.NnfServerStatus)(unsafe.Pointer(&in.Servers)) - out.Drives = *(*[]v1alpha3.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + out.Servers = *(*[]v1alpha4.NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]v1alpha4.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) return nil } -// Convert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha3.NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in, out, s) +// Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha3.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { out.Status = NnfResourceStatusType(in.Status) out.Health = NnfResourceHealthType(in.Health) out.Fenced = in.Fenced @@ -2093,219 +2093,219 @@ func autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha3.N return nil } -// Convert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha3.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha3.NnfNodeStorage, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha3.NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in, out, s) +// Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha3.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha3.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in, out, s) +// Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha3.NnfNodeStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { out.VolumeGroup = in.VolumeGroup out.LogicalVolume = in.LogicalVolume out.Ready = in.Ready return nil } -// Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha3.NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in, out, s) +// Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha3.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { out.VolumeGroup = in.VolumeGroup out.LogicalVolume = in.LogicalVolume out.Ready = in.Ready return nil } -// Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha3.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha3.NnfNodeStorageList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha3.NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in, out, s) +// Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha3.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha3.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in, out, s) +// Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha3.NnfNodeStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { out.Count = in.Count out.SharedAllocation = in.SharedAllocation out.Capacity = in.Capacity out.UserID = in.UserID out.GroupID = in.GroupID out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha1_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } out.BlockReference = in.BlockReference return nil } -// Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha3.NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in, out, s) +// Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha3.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { out.Count = in.Count out.SharedAllocation = in.SharedAllocation out.Capacity = in.Capacity out.UserID = in.UserID out.GroupID = in.GroupID out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha3_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } out.BlockReference = in.BlockReference return nil } -// Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha3.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in, out, s) +// Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in, out, s) } -func autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) +func autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in, out, s) +// Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in, out, s) } -func autoConvert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager(in *NnfPortManager, out *v1alpha3.NnfPortManager, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager(in *NnfPortManager, out *v1alpha3.NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManager_To_v1alpha3_NnfPortManager(in, out, s) +// Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in, out, s) } -func autoConvert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha3.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha3.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManager_To_v1alpha1_NnfPortManager(in, out, s) +// Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in, out, s) } -func autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha3.NnfPortManagerAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { out.Requester = in.Requester out.Count = in.Count return nil } -// Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha3.NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in, out, s) +// Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha3.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { out.Requester = in.Requester out.Count = in.Count return nil } -// Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha3.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in, out, s) +// Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in, out, s) } -func autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha3.NnfPortManagerAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) - out.Status = v1alpha3.NnfPortManagerAllocationStatusStatus(in.Status) + out.Status = v1alpha4.NnfPortManagerAllocationStatusStatus(in.Status) out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) return nil } -// Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha3.NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in, out, s) +// Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha3.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) out.Status = NnfPortManagerAllocationStatusStatus(in.Status) @@ -2313,91 +2313,91 @@ func autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortMana return nil } -// Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha3.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in, out, s) } -func autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha3.NnfPortManagerList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfPortManager)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfPortManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfPortManagerList_To_v1alpha3_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha3.NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in, out, s) +// Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha3.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfPortManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfPortManagerList_To_v1alpha1_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha3.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in, out, s) +// Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in, out, s) } -func autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha3.NnfPortManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration - out.Allocations = *(*[]v1alpha3.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha3.NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in, out, s) +// Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha3.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.Allocations = *(*[]NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha3.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in, out, s) +// Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in, out, s) } -func autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha3.NnfPortManagerStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Status = v1alpha3.NnfPortManagerStatusStatus(in.Status) +func autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = v1alpha4.NnfPortManagerStatusStatus(in.Status) return nil } -// Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha3.NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in, out, s) +// Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha3.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Status = NnfPortManagerStatusStatus(in.Status) return nil } -// Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha3.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in, out, s) +// Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in, out, s) } -func autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha3.NnfResourceStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { out.ID = in.ID out.Name = in.Name - out.Status = v1alpha3.NnfResourceStatusType(in.Status) - out.Health = v1alpha3.NnfResourceHealthType(in.Health) + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) return nil } -// Convert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha3.NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in, out, s) +// Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in, out, s) } -func autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha3.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { out.ID = in.ID out.Name = in.Name out.Status = NnfResourceStatusType(in.Status) @@ -2405,111 +2405,111 @@ func autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1 return nil } -// Convert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha3.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in, out, s) +// Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in, out, s) } -func autoConvert_v1alpha1_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *NnfServerStatus, out *v1alpha3.NnfServerStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { out.Hostname = in.Hostname - if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfServerStatus_To_v1alpha3_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *NnfServerStatus, out *v1alpha3.NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfServerStatus_To_v1alpha3_NnfServerStatus(in, out, s) +// Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha3.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { out.Hostname = in.Hostname - if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfServerStatus_To_v1alpha1_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha3.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfServerStatus_To_v1alpha1_NnfServerStatus(in, out, s) +// Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in, out, s) } -func autoConvert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage(in *NnfStorage, out *v1alpha3.NnfStorage, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage(in *NnfStorage, out *v1alpha3.NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorage_To_v1alpha3_NnfStorage(in, out, s) +// Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in, out, s) } -func autoConvert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha3.NnfStorage, out *NnfStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha3.NnfStorage, out *NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorage_To_v1alpha1_NnfStorage(in, out, s) +// Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in, out, s) } -func autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha3.NnfStorageAllocationNodes, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { out.Name = in.Name out.Count = in.Count return nil } -// Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha3.NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in, out, s) +// Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha3.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { out.Name = in.Name out.Count = in.Count return nil } -// Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha3.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in, out, s) } -func autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha3.NnfStorageAllocationSetSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { out.Name = in.Name out.Capacity = in.Capacity - if err := Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + if err := Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { return err } out.SharedAllocation = in.SharedAllocation - out.Nodes = *(*[]v1alpha3.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + out.Nodes = *(*[]v1alpha4.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) return nil } -// Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha3.NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in, out, s) +// Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha3.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { out.Name = in.Name out.Capacity = in.Capacity - if err := Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + if err := Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { return err } out.SharedAllocation = in.SharedAllocation @@ -2517,56 +2517,56 @@ func autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllo return nil } -// Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha3.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in, out, s) } -func autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha3.NnfStorageAllocationSetStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { out.Ready = in.Ready out.AllocationCount = in.AllocationCount return nil } -// Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha3.NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in, out, s) +// Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha3.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { out.Ready = in.Ready out.AllocationCount = in.AllocationCount return nil } -// Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha3.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in, out, s) } -func autoConvert_v1alpha1_NnfStorageList_To_v1alpha3_NnfStorageList(in *NnfStorageList, out *v1alpha3.NnfStorageList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha1_NnfStorageList_To_v1alpha3_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageList_To_v1alpha3_NnfStorageList(in *NnfStorageList, out *v1alpha3.NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageList_To_v1alpha3_NnfStorageList(in, out, s) +// Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha3.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfStorageList_To_v1alpha1_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha3.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageList_To_v1alpha1_NnfStorageList(in, out, s) +// Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in, out, s) } -func autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha3.NnfStorageLustreSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { out.TargetType = in.TargetType out.BackFs = in.BackFs out.MgsAddress = in.MgsAddress @@ -2574,12 +2574,12 @@ func autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec( return nil } -// Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha3.NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in, out, s) +// Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha3.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { out.TargetType = in.TargetType out.BackFs = in.BackFs out.MgsAddress = in.MgsAddress @@ -2587,73 +2587,73 @@ func autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec( return nil } -// Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha3.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in, out, s) +// Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in, out, s) } -func autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha3.NnfStorageLustreStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference return nil } -// Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha3.NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in, out, s) +// Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha3.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference return nil } -// Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha3.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in, out, s) +// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha3.NnfStorageProfile, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha3.NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in, out, s) +// Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha3.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha3.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in, out, s) +// Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha3.NnfStorageProfileCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { out.Mkfs = in.Mkfs out.SharedVg = in.SharedVg out.PvCreate = in.PvCreate out.PvRemove = in.PvRemove out.VgCreate = in.VgCreate - if err := Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { return err } out.VgRemove = in.VgRemove out.LvCreate = in.LvCreate - if err := Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { return err } out.LvRemove = in.LvRemove @@ -2662,23 +2662,23 @@ func autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfil return nil } -// Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha3.NnfStorageProfileCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in, out, s) +// Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *v1alpha3.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { out.Mkfs = in.Mkfs out.SharedVg = in.SharedVg out.PvCreate = in.PvCreate out.PvRemove = in.PvRemove out.VgCreate = in.VgCreate - if err := Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { return err } out.VgRemove = in.VgRemove out.LvCreate = in.LvCreate - if err := Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { return err } out.LvRemove = in.LvRemove @@ -2689,54 +2689,54 @@ func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfil return nil } -func autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha3.NnfStorageProfileData, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned - if err := Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha3.NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in, out, s) +// Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha3.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned - if err := Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha3.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha3.NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2744,13 +2744,13 @@ func autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfil return nil } -// Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha3.NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in, out, s) +// Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha3.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2758,62 +2758,62 @@ func autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfil return nil } -// Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha3.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in, out, s) +// Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { out.Activate = in.Activate out.Deactivate = in.Deactivate return nil } -// Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +// Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { out.Activate = in.Activate out.Deactivate = in.Deactivate return nil } -// Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { out.LockStart = in.LockStart out.LockStop = in.LockStop return nil } -// Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +// Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { out.LockStart = in.LockStart out.LockStop = in.LockStop return nil } -// Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha3.NnfStorageProfileList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1alpha3.NnfStorageProfile, len(*in)) + *out = make([]v1alpha4.NnfStorageProfile, len(*in)) for i := range *in { - if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -2823,18 +2823,18 @@ func autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileLis return nil } -// Convert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha3.NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in, out, s) +// Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha3.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NnfStorageProfile, len(*in)) for i := range *in { - if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -2844,24 +2844,24 @@ func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileLis return nil } -// Convert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha3.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in, out, s) +// Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha3.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { out.ZpoolCreate = in.ZpoolCreate out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget return nil } -// Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha3.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) +// Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *v1alpha3.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { out.ZpoolCreate = in.ZpoolCreate out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget @@ -2870,7 +2870,7 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorage return nil } -func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha3.NnfStorageProfileLustreData, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { out.CombinedMGTMDT = in.CombinedMGTMDT out.ExternalMGS = in.ExternalMGS out.CapacityMGT = in.CapacityMGT @@ -2878,28 +2878,28 @@ func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProf out.ExclusiveMDT = in.ExclusiveMDT out.CapacityScalingFactor = in.CapacityScalingFactor out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { return err } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { return err } out.MountRabbit = in.MountRabbit @@ -2907,12 +2907,12 @@ func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProf return nil } -// Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha3.NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in, out, s) +// Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha3.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { out.CombinedMGTMDT = in.CombinedMGTMDT out.ExternalMGS = in.ExternalMGS out.CapacityMGT = in.CapacityMGT @@ -2920,28 +2920,28 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProf out.ExclusiveMDT = in.ExclusiveMDT out.CapacityScalingFactor = in.CapacityScalingFactor out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { return err } out.MountRabbit = in.MountRabbit @@ -2949,12 +2949,12 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProf return nil } -// Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha3.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha3.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { out.ColocateComputes = in.ColocateComputes out.Count = in.Count out.Scale = in.Scale @@ -2962,12 +2962,12 @@ func autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStor return nil } -// Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha3.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in, out, s) +// Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha3.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { out.ColocateComputes = in.ColocateComputes out.Count = in.Count out.Scale = in.Scale @@ -2975,13 +2975,13 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStor return nil } -// Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha3.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha3.NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2989,13 +2989,13 @@ func autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfile return nil } -// Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha3.NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in, out, s) +// Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha3.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3003,13 +3003,13 @@ func autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfile return nil } -// Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha3.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in, out, s) } -func autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha3.NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3017,13 +3017,13 @@ func autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfile return nil } -// Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha3.NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in, out, s) +// Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha3.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3031,25 +3031,25 @@ func autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfile return nil } -// Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha3.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in, out, s) } -func autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha3.NnfStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { out.FileSystemType = in.FileSystemType out.UserID = in.UserID out.GroupID = in.GroupID - out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) return nil } -// Convert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha3.NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in, out, s) +// Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha3.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { out.FileSystemType = in.FileSystemType out.UserID = in.UserID out.GroupID = in.GroupID @@ -3057,28 +3057,28 @@ func autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha3 return nil } -// Convert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha3.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in, out, s) +// Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in, out, s) } -func autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha3.NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { +func autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { return err } - out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) out.ResourceError = in.ResourceError out.Ready = in.Ready return nil } -// Convert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha3.NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in, out, s) +// Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha3.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { +func autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { return err } out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) @@ -3087,50 +3087,50 @@ func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1al return nil } -// Convert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha3.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in, out, s) +// Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in, out, s) } -func autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha3.NnfSystemStorage, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha3.NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in, out, s) +// Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha3.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha3.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in, out, s) +// Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in, out, s) } -func autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha3.NnfSystemStorageList, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1alpha3.NnfSystemStorage, len(*in)) + *out = make([]v1alpha4.NnfSystemStorage, len(*in)) for i := range *in { - if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -3140,18 +3140,18 @@ func autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList( return nil } -// Convert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha3.NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in, out, s) +// Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha3.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NnfSystemStorage, len(*in)) for i := range *in { - if err := Convert_v1alpha3_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -3161,18 +3161,18 @@ func autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList( return nil } -// Convert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha3.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in, out, s) +// Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in, out, s) } -func autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha3.NnfSystemStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) - out.ComputesTarget = v1alpha3.NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesTarget = v1alpha4.NnfSystemStorageComputesTarget(in.ComputesTarget) out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) out.Capacity = in.Capacity out.Type = in.Type @@ -3182,12 +3182,12 @@ func autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec( return nil } -// Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha3.NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) +// Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *v1alpha3.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) @@ -3204,24 +3204,24 @@ func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec( return nil } -func autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in, out, s) +// Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in, out, s) +// Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in, out, s) } diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index 31e804b1..214f3c85 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( unsafe "unsafe" apiv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - v1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" + v1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,840 +43,840 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha3.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha3.LustreStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha4.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha4.LustreStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(a.(*v1alpha3.LustreStorageSpec), b.(*LustreStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha3.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess(a.(*NnfAccess), b.(*v1alpha3.NnfAccess), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess(a.(*v1alpha3.NnfAccess), b.(*NnfAccess), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(a.(*v1alpha4.NnfAccess), b.(*NnfAccess), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha3.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfAccessList_To_v1alpha3_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha3.NnfAccessList), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha4.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha4.NnfAccessList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList(a.(*v1alpha3.NnfAccessList), b.(*NnfAccessList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList(a.(*v1alpha4.NnfAccessList), b.(*NnfAccessList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha3.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha3.NnfAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha4.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha4.NnfAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*v1alpha3.NnfAccessSpec), b.(*NnfAccessSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha3.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha3.NnfAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(a.(*v1alpha3.NnfAccessStatus), b.(*NnfAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(a.(*v1alpha4.NnfAccessStatus), b.(*NnfAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha3.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha3.NnfContainerProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha4.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha4.NnfContainerProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(a.(*v1alpha3.NnfContainerProfile), b.(*NnfContainerProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(a.(*v1alpha4.NnfContainerProfile), b.(*NnfContainerProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha3.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha3.NnfContainerProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha4.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha4.NnfContainerProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(a.(*v1alpha3.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(a.(*v1alpha4.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha3.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha3.NnfContainerProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha4.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha4.NnfContainerProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(a.(*v1alpha3.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(a.(*v1alpha4.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha3.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha3.NnfContainerProfileStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha4.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha4.NnfContainerProfileStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(a.(*v1alpha3.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(a.(*v1alpha4.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha3.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha3.NnfDataMovement), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha4.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha4.NnfDataMovement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement(a.(*v1alpha3.NnfDataMovement), b.(*NnfDataMovement), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement(a.(*v1alpha4.NnfDataMovement), b.(*NnfDataMovement), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha3.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha3.NnfDataMovementCommandStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha4.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha4.NnfDataMovementCommandStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(a.(*v1alpha3.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(a.(*v1alpha4.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha3.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha3.NnfDataMovementConfig), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha4.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha4.NnfDataMovementConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(a.(*v1alpha3.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(a.(*v1alpha4.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha3.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha3.NnfDataMovementList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha4.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha4.NnfDataMovementList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(a.(*v1alpha3.NnfDataMovementList), b.(*NnfDataMovementList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(a.(*v1alpha4.NnfDataMovementList), b.(*NnfDataMovementList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha3.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha3.NnfDataMovementManager), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha4.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha4.NnfDataMovementManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(a.(*v1alpha3.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(a.(*v1alpha4.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha3.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha3.NnfDataMovementManagerList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha4.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha4.NnfDataMovementManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(a.(*v1alpha3.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(a.(*v1alpha4.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha3.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha3.NnfDataMovementManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha4.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha4.NnfDataMovementManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(a.(*v1alpha3.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(a.(*v1alpha4.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha3.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha3.NnfDataMovementManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha4.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha4.NnfDataMovementManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(a.(*v1alpha3.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(a.(*v1alpha4.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha3.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha3.NnfDataMovementProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha4.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha4.NnfDataMovementProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(a.(*v1alpha3.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(a.(*v1alpha4.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha3.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha3.NnfDataMovementProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha4.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha4.NnfDataMovementProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(a.(*v1alpha3.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha3.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha3.NnfDataMovementProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(a.(*v1alpha3.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(a.(*v1alpha4.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha3.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha3.NnfDataMovementSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha4.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha4.NnfDataMovementSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(a.(*v1alpha3.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(a.(*v1alpha4.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha3.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha3.NnfDataMovementSpecSourceDestination), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha4.NnfDataMovementSpecSourceDestination), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(a.(*v1alpha3.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(a.(*v1alpha4.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha3.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha3.NnfDataMovementStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha4.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha4.NnfDataMovementStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(a.(*v1alpha3.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(a.(*v1alpha4.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha3.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha3.NnfDriveStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha4.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha4.NnfDriveStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(a.(*v1alpha3.NnfDriveStatus), b.(*NnfDriveStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(a.(*v1alpha4.NnfDriveStatus), b.(*NnfDriveStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha3.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha3.NnfLustreMGT), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha4.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha4.NnfLustreMGT), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(a.(*v1alpha3.NnfLustreMGT), b.(*NnfLustreMGT), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(a.(*v1alpha4.NnfLustreMGT), b.(*NnfLustreMGT), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha3.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha3.NnfLustreMGTList), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha4.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha4.NnfLustreMGTList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(a.(*v1alpha3.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(a.(*v1alpha4.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha3.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha3.NnfLustreMGTSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha4.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha4.NnfLustreMGTSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(a.(*v1alpha3.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(a.(*v1alpha4.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha3.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha3.NnfLustreMGTStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha4.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha4.NnfLustreMGTStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(a.(*v1alpha3.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(a.(*v1alpha4.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha3.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha3.NnfLustreMGTStatusClaim), scope) + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha4.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha4.NnfLustreMGTStatusClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(a.(*v1alpha3.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(a.(*v1alpha4.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha3.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNode_To_v1alpha3_NnfNode(a.(*NnfNode), b.(*v1alpha3.NnfNode), scope) + if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha4.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNode_To_v1alpha4_NnfNode(a.(*NnfNode), b.(*v1alpha4.NnfNode), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNode_To_v1alpha2_NnfNode(a.(*v1alpha3.NnfNode), b.(*NnfNode), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNode_To_v1alpha2_NnfNode(a.(*v1alpha4.NnfNode), b.(*NnfNode), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha3.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha3.NnfNodeBlockStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha4.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha4.NnfNodeBlockStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(a.(*v1alpha3.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(a.(*v1alpha4.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha3.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha3.NnfNodeBlockStorageAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha4.NnfNodeBlockStorageAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(a.(*v1alpha3.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(a.(*v1alpha4.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha3.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha3.NnfNodeBlockStorageAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha3.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha3.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha3.NnfNodeBlockStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha3.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha3.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha3.NnfNodeBlockStorageDeviceStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha3.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha3.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha3.NnfNodeBlockStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha4.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha4.NnfNodeBlockStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(a.(*v1alpha3.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(a.(*v1alpha4.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha3.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha3.NnfNodeBlockStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha4.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha4.NnfNodeBlockStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(a.(*v1alpha3.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(a.(*v1alpha4.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha3.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha3.NnfNodeBlockStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha4.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha4.NnfNodeBlockStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(a.(*v1alpha3.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(a.(*v1alpha4.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha3.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha3.NnfNodeECData), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha4.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha4.NnfNodeECData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData(a.(*v1alpha3.NnfNodeECData), b.(*NnfNodeECData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData(a.(*v1alpha4.NnfNodeECData), b.(*NnfNodeECData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha3.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha3.NnfNodeECDataList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha4.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha4.NnfNodeECDataList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(a.(*v1alpha3.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(a.(*v1alpha4.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha3.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha3.NnfNodeECDataSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha4.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha4.NnfNodeECDataSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(a.(*v1alpha3.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(a.(*v1alpha4.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha3.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha3.NnfNodeECDataStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha4.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha4.NnfNodeECDataStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(a.(*v1alpha3.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(a.(*v1alpha4.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha3.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeList_To_v1alpha3_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha3.NnfNodeList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha4.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeList_To_v1alpha4_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha4.NnfNodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeList_To_v1alpha2_NnfNodeList(a.(*v1alpha3.NnfNodeList), b.(*NnfNodeList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeList_To_v1alpha2_NnfNodeList(a.(*v1alpha4.NnfNodeList), b.(*NnfNodeList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha3.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha3.NnfNodeSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha4.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha4.NnfNodeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(a.(*v1alpha3.NnfNodeSpec), b.(*NnfNodeSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(a.(*v1alpha4.NnfNodeSpec), b.(*NnfNodeSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha3.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha3.NnfNodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha4.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha4.NnfNodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(a.(*v1alpha3.NnfNodeStatus), b.(*NnfNodeStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(a.(*v1alpha4.NnfNodeStatus), b.(*NnfNodeStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha3.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha3.NnfNodeStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha4.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha4.NnfNodeStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(a.(*v1alpha3.NnfNodeStorage), b.(*NnfNodeStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(a.(*v1alpha4.NnfNodeStorage), b.(*NnfNodeStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha3.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha3.NnfNodeStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha4.NnfNodeStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(a.(*v1alpha3.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(a.(*v1alpha4.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha3.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha3.NnfNodeStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha4.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha4.NnfNodeStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(a.(*v1alpha3.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(a.(*v1alpha4.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha3.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha3.NnfNodeStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha4.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha4.NnfNodeStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(a.(*v1alpha3.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(a.(*v1alpha4.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha3.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha3.NnfNodeStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha4.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha4.NnfNodeStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(a.(*v1alpha3.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(a.(*v1alpha4.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha3.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha3.NnfPortManager), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha4.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha4.NnfPortManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager(a.(*v1alpha3.NnfPortManager), b.(*NnfPortManager), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager(a.(*v1alpha4.NnfPortManager), b.(*NnfPortManager), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha3.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha3.NnfPortManagerAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha4.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha4.NnfPortManagerAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(a.(*v1alpha3.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(a.(*v1alpha4.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha3.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha3.NnfPortManagerAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha4.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha4.NnfPortManagerAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(a.(*v1alpha3.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(a.(*v1alpha4.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha3.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha3.NnfPortManagerList), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha4.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha4.NnfPortManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(a.(*v1alpha3.NnfPortManagerList), b.(*NnfPortManagerList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(a.(*v1alpha4.NnfPortManagerList), b.(*NnfPortManagerList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha3.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha3.NnfPortManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha4.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha4.NnfPortManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(a.(*v1alpha3.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(a.(*v1alpha4.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha3.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha3.NnfPortManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha4.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha4.NnfPortManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(a.(*v1alpha3.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(a.(*v1alpha4.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha3.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha3.NnfResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha4.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha4.NnfResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(a.(*v1alpha3.NnfResourceStatus), b.(*NnfResourceStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(a.(*v1alpha4.NnfResourceStatus), b.(*NnfResourceStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha3.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfServerStatus_To_v1alpha3_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha3.NnfServerStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha4.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfServerStatus_To_v1alpha4_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha4.NnfServerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfServerStatus_To_v1alpha2_NnfServerStatus(a.(*v1alpha3.NnfServerStatus), b.(*NnfServerStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfServerStatus_To_v1alpha2_NnfServerStatus(a.(*v1alpha4.NnfServerStatus), b.(*NnfServerStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha3.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage(a.(*NnfStorage), b.(*v1alpha3.NnfStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha4.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(a.(*NnfStorage), b.(*v1alpha4.NnfStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage(a.(*v1alpha3.NnfStorage), b.(*NnfStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(a.(*v1alpha4.NnfStorage), b.(*NnfStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha3.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha3.NnfStorageAllocationNodes), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha4.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha4.NnfStorageAllocationNodes), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(a.(*v1alpha3.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(a.(*v1alpha4.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha3.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha3.NnfStorageAllocationSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha4.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha4.NnfStorageAllocationSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(a.(*v1alpha3.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(a.(*v1alpha4.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha3.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha3.NnfStorageAllocationSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha4.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha4.NnfStorageAllocationSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(a.(*v1alpha3.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(a.(*v1alpha4.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha3.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageList_To_v1alpha3_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha3.NnfStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha4.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha4.NnfStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageList_To_v1alpha2_NnfStorageList(a.(*v1alpha3.NnfStorageList), b.(*NnfStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList(a.(*v1alpha4.NnfStorageList), b.(*NnfStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha3.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha3.NnfStorageLustreSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha4.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha4.NnfStorageLustreSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(a.(*v1alpha3.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(a.(*v1alpha4.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha3.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha3.NnfStorageLustreStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha4.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha4.NnfStorageLustreStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(a.(*v1alpha3.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha3.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha3.NnfStorageProfile), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(a.(*v1alpha3.NnfStorageProfile), b.(*NnfStorageProfile), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(a.(*v1alpha4.NnfStorageProfile), b.(*NnfStorageProfile), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha3.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha3.NnfStorageProfileCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha3.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha3.NnfStorageProfileData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha4.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha4.NnfStorageProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(a.(*v1alpha3.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(a.(*v1alpha4.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha3.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha3.NnfStorageProfileGFS2Data), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha4.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha4.NnfStorageProfileGFS2Data), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(a.(*v1alpha3.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(a.(*v1alpha4.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha3.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha3.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha3.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha3.NnfStorageProfileList), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha4.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha4.NnfStorageProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(a.(*v1alpha3.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(a.(*v1alpha4.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha3.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha3.NnfStorageProfileLustreCmdLines), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha4.NnfStorageProfileLustreCmdLines), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha3.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha3.NnfStorageProfileLustreData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha4.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha4.NnfStorageProfileLustreData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(a.(*v1alpha3.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(a.(*v1alpha4.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha3.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha3.NnfStorageProfileLustreMiscOptions), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha4.NnfStorageProfileLustreMiscOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(a.(*v1alpha3.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(a.(*v1alpha4.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha3.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha3.NnfStorageProfileRawData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha4.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha4.NnfStorageProfileRawData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(a.(*v1alpha3.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(a.(*v1alpha4.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha3.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha3.NnfStorageProfileXFSData), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha4.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha4.NnfStorageProfileXFSData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(a.(*v1alpha3.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(a.(*v1alpha4.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha3.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha3.NnfStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha4.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha4.NnfStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(a.(*v1alpha3.NnfStorageSpec), b.(*NnfStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(a.(*v1alpha4.NnfStorageSpec), b.(*NnfStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha3.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha3.NnfStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha4.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha4.NnfStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(a.(*v1alpha3.NnfStorageStatus), b.(*NnfStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(a.(*v1alpha4.NnfStorageStatus), b.(*NnfStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha3.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha3.NnfSystemStorage), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha4.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha4.NnfSystemStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(a.(*v1alpha3.NnfSystemStorage), b.(*NnfSystemStorage), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(a.(*v1alpha4.NnfSystemStorage), b.(*NnfSystemStorage), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha3.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha3.NnfSystemStorageList), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha4.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha4.NnfSystemStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(a.(*v1alpha3.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(a.(*v1alpha4.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha3.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha3.NnfSystemStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha4.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha4.NnfSystemStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(a.(*v1alpha3.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha3.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha3.NnfSystemStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha3.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(a.(*v1alpha3.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(a.(*v1alpha4.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1alpha3.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*v1alpha3.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1alpha3.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(a.(*v1alpha3.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) }); err != nil { return err } return nil } -func autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha3.LustreStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { out.FileSystemName = in.FileSystemName out.TargetType = in.TargetType out.StartIndex = in.StartIndex @@ -885,12 +885,12 @@ func autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *Lu return nil } -// Convert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha3.LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in, out, s) +// Convert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in, out, s) } -func autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1alpha3.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { out.FileSystemName = in.FileSystemName out.TargetType = in.TargetType out.StartIndex = in.StartIndex @@ -899,66 +899,66 @@ func autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1 return nil } -// Convert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1alpha3.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in, out, s) +// Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in, out, s) } -func autoConvert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess(in *NnfAccess, out *v1alpha3.NnfAccess, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess(in *NnfAccess, out *v1alpha3.NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfAccess_To_v1alpha3_NnfAccess(in, out, s) +// Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(in, out, s) } -func autoConvert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess(in *v1alpha3.NnfAccess, out *NnfAccess, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess(in *v1alpha3.NnfAccess, out *NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccess_To_v1alpha2_NnfAccess(in, out, s) +// Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(in, out, s) } -func autoConvert_v1alpha2_NnfAccessList_To_v1alpha3_NnfAccessList(in *NnfAccessList, out *v1alpha3.NnfAccessList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfAccess)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfAccessList_To_v1alpha3_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha2_NnfAccessList_To_v1alpha3_NnfAccessList(in *NnfAccessList, out *v1alpha3.NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfAccessList_To_v1alpha3_NnfAccessList(in, out, s) +// Convert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(in, out, s) } -func autoConvert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha3.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha3.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList(in, out, s) +// Convert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList(in, out, s) } -func autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState out.TeardownState = apiv1alpha2.WorkflowState(in.TeardownState) out.Target = in.Target @@ -972,12 +972,12 @@ func autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessS return nil } -// Convert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) +// Convert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in, out, s) } -func autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha3.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState out.TeardownState = apiv1alpha2.WorkflowState(in.TeardownState) out.Target = in.Target @@ -991,64 +991,64 @@ func autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha3.N return nil } -// Convert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha3.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) +// Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) } -func autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in, out, s) +// Convert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in, out, s) } -func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in, out, s) +// Convert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in, out, s) } -func autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha3.NnfContainerProfile, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha3.NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in, out, s) +// Convert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *v1alpha3.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *v1alpha3.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in, out, s) +// Convert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha2_NnfContainerProfile(in, out, s) } -func autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha3.NnfContainerProfileData, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { out.Pinned = in.Pinned - out.Storages = *(*[]v1alpha3.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.Storages = *(*[]v1alpha4.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) out.RetryLimit = in.RetryLimit @@ -1060,12 +1060,12 @@ func autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfil return nil } -// Convert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha3.NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in, out, s) +// Convert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *v1alpha3.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { out.Pinned = in.Pinned out.Storages = *(*[]NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) @@ -1079,90 +1079,90 @@ func autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfil return nil } -// Convert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *v1alpha3.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in, out, s) +// Convert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha2_NnfContainerProfileData(in, out, s) } -func autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha3.NnfContainerProfileList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfContainerProfile)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfContainerProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha2_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha3.NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in, out, s) +// Convert_v1alpha2_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *v1alpha3.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfContainerProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *v1alpha3.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in, out, s) +// Convert_v1alpha4_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha2_NnfContainerProfileList(in, out, s) } -func autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha3.NnfContainerProfileStorage, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { out.Name = in.Name out.Optional = in.Optional out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) return nil } -// Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha3.NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in, out, s) +// Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in, out, s) } -func autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *v1alpha3.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { out.Name = in.Name out.Optional = in.Optional out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) return nil } -// Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *v1alpha3.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in, out, s) +// Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha2_NnfContainerProfileStorage(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *NnfDataMovement, out *v1alpha3.NnfDataMovement, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *NnfDataMovement, out *v1alpha3.NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovement_To_v1alpha3_NnfDataMovement(in, out, s) +// Convert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovement_To_v1alpha4_NnfDataMovement(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *v1alpha3.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *v1alpha3.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovement_To_v1alpha2_NnfDataMovement(in, out, s) +// Convert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovement_To_v1alpha2_NnfDataMovement(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha3.NnfDataMovementCommandStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { out.Command = in.Command out.ElapsedTime = in.ElapsedTime out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) @@ -1178,12 +1178,12 @@ func autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMoveme return nil } -// Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha3.NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in, out, s) +// Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *v1alpha3.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { out.Command = in.Command out.ElapsedTime = in.ElapsedTime out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) @@ -1199,12 +1199,12 @@ func autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMoveme return nil } -// Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *v1alpha3.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha2_NnfDataMovementCommandStatus(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha3.NnfDataMovementConfig, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { out.Dryrun = in.Dryrun out.MpirunOptions = in.MpirunOptions out.DcpOptions = in.DcpOptions @@ -1215,12 +1215,12 @@ func autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfi return nil } -// Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha3.NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in, out, s) +// Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *v1alpha3.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { out.Dryrun = in.Dryrun out.MpirunOptions = in.MpirunOptions out.DcpOptions = in.DcpOptions @@ -1231,88 +1231,88 @@ func autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfi return nil } -// Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *v1alpha3.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in, out, s) +// Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha2_NnfDataMovementConfig(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha3.NnfDataMovementList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovement)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovement)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfDataMovementList_To_v1alpha3_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha3.NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in, out, s) +// Convert_v1alpha2_NnfDataMovementList_To_v1alpha4_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *v1alpha3.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovement)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementList_To_v1alpha2_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *v1alpha3.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in, out, s) +// Convert_v1alpha4_NnfDataMovementList_To_v1alpha2_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha2_NnfDataMovementList(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha3.NnfDataMovementManager, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha3.NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in, out, s) +// Convert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *v1alpha3.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *v1alpha3.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in, out, s) +// Convert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha2_NnfDataMovementManager(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha3.NnfDataMovementManagerList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha3.NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in, out, s) +// Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *v1alpha3.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovementManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *v1alpha3.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha2_NnfDataMovementManagerList(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha3.NnfDataMovementManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { out.Selector = in.Selector out.Template = in.Template out.UpdateStrategy = in.UpdateStrategy @@ -1321,12 +1321,12 @@ func autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovement return nil } -// Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha3.NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in, out, s) +// Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *v1alpha3.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { out.Selector = in.Selector out.Template = in.Template out.UpdateStrategy = in.UpdateStrategy @@ -1335,58 +1335,58 @@ func autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovement return nil } -// Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *v1alpha3.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha2_NnfDataMovementManagerSpec(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha3.NnfDataMovementManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { out.Ready = in.Ready return nil } -// Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha3.NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in, out, s) +// Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *v1alpha3.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { out.Ready = in.Ready return nil } -// Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *v1alpha3.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha2_NnfDataMovementManagerStatus(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha3.NnfDataMovementProfile, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha3.NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in, out, s) +// Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *v1alpha3.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *v1alpha3.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha3.NnfDataMovementProfileData, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned out.Slots = in.Slots @@ -1400,12 +1400,12 @@ func autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovement return nil } -// Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha3.NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) +// Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *v1alpha3.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned out.Slots = in.Slots @@ -1419,50 +1419,50 @@ func autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovement return nil } -// Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *v1alpha3.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha3.NnfDataMovementProfileList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha3.NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in, out, s) +// Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *v1alpha3.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *v1alpha3.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in, out, s) +// Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha3.NnfDataMovementSpec, s conversion.Scope) error { - out.Source = (*v1alpha3.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) - out.Destination = (*v1alpha3.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) +func autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) out.UserId = in.UserId out.GroupId = in.GroupId out.Cancel = in.Cancel out.ProfileReference = in.ProfileReference - out.UserConfig = (*v1alpha3.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + out.UserConfig = (*v1alpha4.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) return nil } -// Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha3.NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in, out, s) +// Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *v1alpha3.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { out.Source = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) out.Destination = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) out.UserId = in.UserId @@ -1473,51 +1473,51 @@ func autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in return nil } -// Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *v1alpha3.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in, out, s) +// Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha2_NnfDataMovementSpec(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha3.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { out.Path = in.Path out.StorageReference = in.StorageReference return nil } -// Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha3.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in, out, s) +// Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *v1alpha3.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { out.Path = in.Path out.StorageReference = in.StorageReference return nil } -// Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *v1alpha3.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in, out, s) +// Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha2_NnfDataMovementSpecSourceDestination(in, out, s) } -func autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha3.NnfDataMovementStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { out.State = in.State out.Status = in.Status out.Message = in.Message out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts - out.CommandStatus = (*v1alpha3.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.CommandStatus = (*v1alpha4.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha3.NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in, out, s) +// Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *v1alpha3.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { out.State = in.State out.Status = in.Status out.Message = in.Message @@ -1529,102 +1529,102 @@ func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatu return nil } -// Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *v1alpha3.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in, out, s) +// Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatus(in, out, s) } -func autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha3.NnfDriveStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { out.Model = in.Model out.SerialNumber = in.SerialNumber out.FirmwareVersion = in.FirmwareVersion out.Slot = in.Slot out.Capacity = in.Capacity out.WearLevel = in.WearLevel - if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfDriveStatus_To_v1alpha3_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha3.NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in, out, s) +// Convert_v1alpha2_NnfDriveStatus_To_v1alpha4_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in, out, s) } -func autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *v1alpha3.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { out.Model = in.Model out.SerialNumber = in.SerialNumber out.FirmwareVersion = in.FirmwareVersion out.Slot = in.Slot out.Capacity = in.Capacity out.WearLevel = in.WearLevel - if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfDriveStatus_To_v1alpha2_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *v1alpha3.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in, out, s) +// Convert_v1alpha4_NnfDriveStatus_To_v1alpha2_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha2_NnfDriveStatus(in, out, s) } -func autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha3.NnfLustreMGT, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha3.NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in, out, s) +// Convert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *v1alpha3.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *v1alpha3.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in, out, s) +// Convert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha2_NnfLustreMGT(in, out, s) } -func autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha3.NnfLustreMGTList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfLustreMGT)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfLustreMGT)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha2_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha3.NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in, out, s) +// Convert_v1alpha2_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *v1alpha3.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfLustreMGT)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *v1alpha3.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in, out, s) +// Convert_v1alpha4_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha2_NnfLustreMGTList(in, out, s) } -func autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha3.NnfLustreMGTSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) out.FsNameStart = in.FsNameStart @@ -1633,12 +1633,12 @@ func autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfL return nil } -// Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha3.NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in, out, s) +// Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1alpha3.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) out.FsNameStart = in.FsNameStart @@ -1647,179 +1647,179 @@ func autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1al return nil } -// Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1alpha3.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in, out, s) +// Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in, out, s) } -func autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext - out.ClaimList = *(*[]v1alpha3.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ClaimList = *(*[]v1alpha4.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in, out, s) +// Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in, out, s) +// Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in, out, s) } -func autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha3.NnfLustreMGTStatusClaim, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { out.Reference = in.Reference out.FsName = in.FsName return nil } -// Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha3.NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in, out, s) +// Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in, out, s) } -func autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *v1alpha3.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { out.Reference = in.Reference out.FsName = in.FsName return nil } -// Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *v1alpha3.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in, out, s) +// Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha2_NnfLustreMGTStatusClaim(in, out, s) } -func autoConvert_v1alpha2_NnfNode_To_v1alpha3_NnfNode(in *NnfNode, out *v1alpha3.NnfNode, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfNode_To_v1alpha3_NnfNode is an autogenerated conversion function. -func Convert_v1alpha2_NnfNode_To_v1alpha3_NnfNode(in *NnfNode, out *v1alpha3.NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNode_To_v1alpha3_NnfNode(in, out, s) +// Convert_v1alpha2_NnfNode_To_v1alpha4_NnfNode is an autogenerated conversion function. +func Convert_v1alpha2_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNode_To_v1alpha4_NnfNode(in, out, s) } -func autoConvert_v1alpha3_NnfNode_To_v1alpha2_NnfNode(in *v1alpha3.NnfNode, out *NnfNode, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNode_To_v1alpha2_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNode_To_v1alpha2_NnfNode is an autogenerated conversion function. -func Convert_v1alpha3_NnfNode_To_v1alpha2_NnfNode(in *v1alpha3.NnfNode, out *NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNode_To_v1alpha2_NnfNode(in, out, s) +// Convert_v1alpha4_NnfNode_To_v1alpha2_NnfNode is an autogenerated conversion function. +func Convert_v1alpha4_NnfNode_To_v1alpha2_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNode_To_v1alpha2_NnfNode(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha3.NnfNodeBlockStorage, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha3.NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *v1alpha3.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *v1alpha3.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha2_NnfNodeBlockStorage(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha3.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) out.StorageGroupId = in.StorageGroupId return nil } -// Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha3.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *v1alpha3.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) out.StorageGroupId = in.StorageGroupId return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *v1alpha3.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha2_NnfNodeBlockStorageAccessStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha3.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { out.Capacity = in.Capacity out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) return nil } -// Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha3.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *v1alpha3.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { out.Capacity = in.Capacity out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *v1alpha3.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha2_NnfNodeBlockStorageAllocationSpec(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha3.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - out.Accesses = *(*map[string]v1alpha3.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) - out.Devices = *(*[]v1alpha3.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) +func autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]v1alpha4.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]v1alpha4.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) out.CapacityAllocated = in.CapacityAllocated out.StoragePoolId = in.StoragePoolId return nil } -// Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha3.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *v1alpha3.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { out.Accesses = *(*map[string]NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) out.Devices = *(*[]NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) out.CapacityAllocated = in.CapacityAllocated @@ -1827,93 +1827,93 @@ func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNod return nil } -// Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *v1alpha3.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha2_NnfNodeBlockStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha3.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { out.NQN = in.NQN out.NamespaceId = in.NamespaceId out.CapacityAllocated = in.CapacityAllocated return nil } -// Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha3.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *v1alpha3.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { out.NQN = in.NQN out.NamespaceId = in.NamespaceId out.CapacityAllocated = in.CapacityAllocated return nil } -// Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *v1alpha3.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha2_NnfNodeBlockStorageDeviceStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha3.NnfNodeBlockStorageList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha3.NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *v1alpha3.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *v1alpha3.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha2_NnfNodeBlockStorageList(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha3.NnfNodeBlockStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { out.SharedAllocation = in.SharedAllocation - out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha3.NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *v1alpha3.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { out.SharedAllocation = in.SharedAllocation out.Allocations = *(*[]NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *v1alpha3.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpec(in, out, s) } -func autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) +func autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.ResourceError = in.ResourceError out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil } -// Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in, out, s) +// Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.ResourceError = in.ResourceError out.PodStartTime = in.PodStartTime @@ -1921,167 +1921,167 @@ func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStor return nil } -// Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in, out, s) +// Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *NnfNodeECData, out *v1alpha3.NnfNodeECData, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *NnfNodeECData, out *v1alpha3.NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeECData_To_v1alpha3_NnfNodeECData(in, out, s) +// Convert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECData_To_v1alpha4_NnfNodeECData(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *v1alpha3.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *v1alpha3.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECData_To_v1alpha2_NnfNodeECData(in, out, s) +// Convert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECData_To_v1alpha2_NnfNodeECData(in, out, s) } -func autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha3.NnfNodeECDataList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeECData)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeECData)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha3.NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in, out, s) +// Convert_v1alpha2_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *v1alpha3.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeECData)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *v1alpha3.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in, out, s) +// Convert_v1alpha4_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha2_NnfNodeECDataList(in, out, s) } -func autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha3.NnfNodeECDataSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { return nil } -// Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha3.NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in, out, s) +// Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *v1alpha3.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { return nil } -// Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *v1alpha3.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in, out, s) +// Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha2_NnfNodeECDataSpec(in, out, s) } -func autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha3.NnfNodeECDataStatus, s conversion.Scope) error { - out.Data = *(*map[string]v1alpha3.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) +func autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]v1alpha4.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) return nil } -// Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha3.NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in, out, s) +// Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *v1alpha3.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { out.Data = *(*map[string]NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) return nil } -// Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *v1alpha3.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in, out, s) +// Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha2_NnfNodeECDataStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeList_To_v1alpha3_NnfNodeList(in *NnfNodeList, out *v1alpha3.NnfNodeList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNode)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNode)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfNodeList_To_v1alpha3_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeList_To_v1alpha3_NnfNodeList(in *NnfNodeList, out *v1alpha3.NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeList_To_v1alpha3_NnfNodeList(in, out, s) +// Convert_v1alpha2_NnfNodeList_To_v1alpha4_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeList_To_v1alpha4_NnfNodeList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeList_To_v1alpha2_NnfNodeList(in *v1alpha3.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeList_To_v1alpha2_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNode)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeList_To_v1alpha2_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeList_To_v1alpha2_NnfNodeList(in *v1alpha3.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeList_To_v1alpha2_NnfNodeList(in, out, s) +// Convert_v1alpha4_NnfNodeList_To_v1alpha2_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeList_To_v1alpha2_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeList_To_v1alpha2_NnfNodeList(in, out, s) } -func autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha3.NnfNodeSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { out.Name = in.Name out.Pod = in.Pod - out.State = v1alpha3.NnfResourceStateType(in.State) + out.State = v1alpha4.NnfResourceStateType(in.State) return nil } -// Convert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha3.NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in, out, s) +// Convert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *v1alpha3.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { out.Name = in.Name out.Pod = in.Pod out.State = NnfResourceStateType(in.State) return nil } -// Convert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *v1alpha3.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in, out, s) +// Convert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha2_NnfNodeSpec(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha3.NnfNodeStatus, s conversion.Scope) error { - out.Status = v1alpha3.NnfResourceStatusType(in.Status) - out.Health = v1alpha3.NnfResourceHealthType(in.Health) +func autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) out.Fenced = in.Fenced out.LNetNid = in.LNetNid out.Capacity = in.Capacity out.CapacityAllocated = in.CapacityAllocated - out.Servers = *(*[]v1alpha3.NnfServerStatus)(unsafe.Pointer(&in.Servers)) - out.Drives = *(*[]v1alpha3.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + out.Servers = *(*[]v1alpha4.NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]v1alpha4.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) return nil } -// Convert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha3.NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in, out, s) +// Convert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *v1alpha3.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { out.Status = NnfResourceStatusType(in.Status) out.Health = NnfResourceHealthType(in.Health) out.Fenced = in.Fenced @@ -2093,219 +2093,219 @@ func autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *v1alpha3.N return nil } -// Convert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *v1alpha3.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha2_NnfNodeStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha3.NnfNodeStorage, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha3.NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in, out, s) +// Convert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *v1alpha3.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *v1alpha3.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in, out, s) +// Convert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha3.NnfNodeStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { out.VolumeGroup = in.VolumeGroup out.LogicalVolume = in.LogicalVolume out.Ready = in.Ready return nil } -// Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha3.NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in, out, s) +// Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *v1alpha3.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { out.VolumeGroup = in.VolumeGroup out.LogicalVolume = in.LogicalVolume out.Ready = in.Ready return nil } -// Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *v1alpha3.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageAllocationStatus(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha3.NnfNodeStorageList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha3.NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in, out, s) +// Convert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *v1alpha3.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *v1alpha3.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in, out, s) +// Convert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha3.NnfNodeStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { out.Count = in.Count out.SharedAllocation = in.SharedAllocation out.Capacity = in.Capacity out.UserID = in.UserID out.GroupID = in.GroupID out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha2_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha2_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } out.BlockReference = in.BlockReference return nil } -// Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha3.NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in, out, s) +// Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *v1alpha3.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { out.Count = in.Count out.SharedAllocation = in.SharedAllocation out.Capacity = in.Capacity out.UserID = in.UserID out.GroupID = in.GroupID out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha3_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } out.BlockReference = in.BlockReference return nil } -// Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *v1alpha3.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in, out, s) +// Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in, out, s) } -func autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) +func autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in, out, s) +// Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in, out, s) +// Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in, out, s) } -func autoConvert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager(in *NnfPortManager, out *v1alpha3.NnfPortManager, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager(in *NnfPortManager, out *v1alpha3.NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManager_To_v1alpha3_NnfPortManager(in, out, s) +// Convert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManager_To_v1alpha4_NnfPortManager(in, out, s) } -func autoConvert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager(in *v1alpha3.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager(in *v1alpha3.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManager_To_v1alpha2_NnfPortManager(in, out, s) +// Convert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManager_To_v1alpha2_NnfPortManager(in, out, s) } -func autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha3.NnfPortManagerAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { out.Requester = in.Requester out.Count = in.Count return nil } -// Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha3.NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in, out, s) +// Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *v1alpha3.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { out.Requester = in.Requester out.Count = in.Count return nil } -// Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *v1alpha3.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in, out, s) +// Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha2_NnfPortManagerAllocationSpec(in, out, s) } -func autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha3.NnfPortManagerAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) - out.Status = v1alpha3.NnfPortManagerAllocationStatusStatus(in.Status) + out.Status = v1alpha4.NnfPortManagerAllocationStatusStatus(in.Status) out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) return nil } -// Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha3.NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in, out, s) +// Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *v1alpha3.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) out.Status = NnfPortManagerAllocationStatusStatus(in.Status) @@ -2313,91 +2313,91 @@ func autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortMana return nil } -// Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *v1alpha3.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in, out, s) +// Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha2_NnfPortManagerAllocationStatus(in, out, s) } -func autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha3.NnfPortManagerList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfPortManager)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfPortManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfPortManagerList_To_v1alpha3_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha3.NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in, out, s) +// Convert_v1alpha2_NnfPortManagerList_To_v1alpha4_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *v1alpha3.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfPortManager)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfPortManagerList_To_v1alpha2_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *v1alpha3.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in, out, s) +// Convert_v1alpha4_NnfPortManagerList_To_v1alpha2_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha2_NnfPortManagerList(in, out, s) } -func autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha3.NnfPortManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration - out.Allocations = *(*[]v1alpha3.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha3.NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in, out, s) +// Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *v1alpha3.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.Allocations = *(*[]NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) return nil } -// Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *v1alpha3.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in, out, s) +// Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha2_NnfPortManagerSpec(in, out, s) } -func autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha3.NnfPortManagerStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha3.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Status = v1alpha3.NnfPortManagerStatusStatus(in.Status) +func autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = v1alpha4.NnfPortManagerStatusStatus(in.Status) return nil } -// Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha3.NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in, out, s) +// Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *v1alpha3.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Status = NnfPortManagerStatusStatus(in.Status) return nil } -// Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *v1alpha3.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in, out, s) +// Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha2_NnfPortManagerStatus(in, out, s) } -func autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha3.NnfResourceStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { out.ID = in.ID out.Name = in.Name - out.Status = v1alpha3.NnfResourceStatusType(in.Status) - out.Health = v1alpha3.NnfResourceHealthType(in.Health) + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) return nil } -// Convert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha3.NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in, out, s) +// Convert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in, out, s) } -func autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *v1alpha3.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { out.ID = in.ID out.Name = in.Name out.Status = NnfResourceStatusType(in.Status) @@ -2405,111 +2405,111 @@ func autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *v1 return nil } -// Convert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *v1alpha3.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in, out, s) +// Convert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(in, out, s) } -func autoConvert_v1alpha2_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *NnfServerStatus, out *v1alpha3.NnfServerStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { out.Hostname = in.Hostname - if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha2_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfServerStatus_To_v1alpha3_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *NnfServerStatus, out *v1alpha3.NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfServerStatus_To_v1alpha3_NnfServerStatus(in, out, s) +// Convert_v1alpha2_NnfServerStatus_To_v1alpha4_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfServerStatus_To_v1alpha4_NnfServerStatus(in, out, s) } -func autoConvert_v1alpha3_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *v1alpha3.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { out.Hostname = in.Hostname - if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha2_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfServerStatus_To_v1alpha2_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *v1alpha3.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfServerStatus_To_v1alpha2_NnfServerStatus(in, out, s) +// Convert_v1alpha4_NnfServerStatus_To_v1alpha2_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfServerStatus_To_v1alpha2_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfServerStatus_To_v1alpha2_NnfServerStatus(in, out, s) } -func autoConvert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage(in *NnfStorage, out *v1alpha3.NnfStorage, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage(in *NnfStorage, out *v1alpha3.NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorage_To_v1alpha3_NnfStorage(in, out, s) +// Convert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(in, out, s) } -func autoConvert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage(in *v1alpha3.NnfStorage, out *NnfStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage(in *v1alpha3.NnfStorage, out *NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorage_To_v1alpha2_NnfStorage(in, out, s) +// Convert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(in, out, s) } -func autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha3.NnfStorageAllocationNodes, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { out.Name = in.Name out.Count = in.Count return nil } -// Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha3.NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in, out, s) +// Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *v1alpha3.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { out.Name = in.Name out.Count = in.Count return nil } -// Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *v1alpha3.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha2_NnfStorageAllocationNodes(in, out, s) } -func autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha3.NnfStorageAllocationSetSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { out.Name = in.Name out.Capacity = in.Capacity - if err := Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + if err := Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { return err } out.SharedAllocation = in.SharedAllocation - out.Nodes = *(*[]v1alpha3.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + out.Nodes = *(*[]v1alpha4.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) return nil } -// Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha3.NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in, out, s) +// Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *v1alpha3.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { out.Name = in.Name out.Capacity = in.Capacity - if err := Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + if err := Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { return err } out.SharedAllocation = in.SharedAllocation @@ -2517,56 +2517,56 @@ func autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllo return nil } -// Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *v1alpha3.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha2_NnfStorageAllocationSetSpec(in, out, s) } -func autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha3.NnfStorageAllocationSetStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { out.Ready = in.Ready out.AllocationCount = in.AllocationCount return nil } -// Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha3.NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in, out, s) +// Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *v1alpha3.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { out.Ready = in.Ready out.AllocationCount = in.AllocationCount return nil } -// Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *v1alpha3.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in, out, s) +// Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAllocationSetStatus(in, out, s) } -func autoConvert_v1alpha2_NnfStorageList_To_v1alpha3_NnfStorageList(in *NnfStorageList, out *v1alpha3.NnfStorageList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfStorageList_To_v1alpha3_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageList_To_v1alpha3_NnfStorageList(in *NnfStorageList, out *v1alpha3.NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageList_To_v1alpha3_NnfStorageList(in, out, s) +// Convert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfStorageList_To_v1alpha2_NnfStorageList(in *v1alpha3.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfStorageList_To_v1alpha2_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageList_To_v1alpha2_NnfStorageList(in *v1alpha3.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageList_To_v1alpha2_NnfStorageList(in, out, s) +// Convert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList(in, out, s) } -func autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha3.NnfStorageLustreSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { out.TargetType = in.TargetType out.BackFs = in.BackFs out.MgsAddress = in.MgsAddress @@ -2574,12 +2574,12 @@ func autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec( return nil } -// Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha3.NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in, out, s) +// Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *v1alpha3.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { out.TargetType = in.TargetType out.BackFs = in.BackFs out.MgsAddress = in.MgsAddress @@ -2587,73 +2587,73 @@ func autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec( return nil } -// Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *v1alpha3.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in, out, s) +// Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha2_NnfStorageLustreSpec(in, out, s) } -func autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha3.NnfStorageLustreStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference return nil } -// Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha3.NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in, out, s) +// Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *v1alpha3.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference return nil } -// Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *v1alpha3.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in, out, s) +// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha3.NnfStorageProfile, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha3.NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in, out, s) +// Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *v1alpha3.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *v1alpha3.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in, out, s) +// Convert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha3.NnfStorageProfileCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { out.Mkfs = in.Mkfs out.SharedVg = in.SharedVg out.PvCreate = in.PvCreate out.PvRemove = in.PvRemove out.VgCreate = in.VgCreate - if err := Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { return err } out.VgRemove = in.VgRemove out.LvCreate = in.LvCreate - if err := Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { return err } out.LvRemove = in.LvRemove @@ -2662,23 +2662,23 @@ func autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfil return nil } -// Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha3.NnfStorageProfileCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in, out, s) +// Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *v1alpha3.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { out.Mkfs = in.Mkfs out.SharedVg = in.SharedVg out.PvCreate = in.PvCreate out.PvRemove = in.PvRemove out.VgCreate = in.VgCreate - if err := Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { return err } out.VgRemove = in.VgRemove out.LvCreate = in.LvCreate - if err := Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { return err } out.LvRemove = in.LvRemove @@ -2689,54 +2689,54 @@ func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfil return nil } -func autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha3.NnfStorageProfileData, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned - if err := Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha3.NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in, out, s) +// Convert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *v1alpha3.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned - if err := Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *v1alpha3.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha2_NnfStorageProfileData(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha3.NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2744,13 +2744,13 @@ func autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfil return nil } -// Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha3.NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in, out, s) +// Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *v1alpha3.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2758,62 +2758,62 @@ func autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfil return nil } -// Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *v1alpha3.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in, out, s) +// Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha2_NnfStorageProfileGFS2Data(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { out.Activate = in.Activate out.Deactivate = in.Deactivate return nil } -// Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +// Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { out.Activate = in.Activate out.Deactivate = in.Deactivate return nil } -// Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { out.LockStart = in.LockStart out.LockStop = in.LockStop return nil } -// Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +// Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { out.LockStart = in.LockStart out.LockStop = in.LockStop return nil } -// Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha3.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha2_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha3.NnfStorageProfileList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1alpha3.NnfStorageProfile, len(*in)) + *out = make([]v1alpha4.NnfStorageProfile, len(*in)) for i := range *in { - if err := Convert_v1alpha2_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -2823,18 +2823,18 @@ func autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileLis return nil } -// Convert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha3.NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in, out, s) +// Convert_v1alpha2_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *v1alpha3.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NnfStorageProfile, len(*in)) for i := range *in { - if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha2_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -2844,24 +2844,24 @@ func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileLis return nil } -// Convert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *v1alpha3.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in, out, s) +// Convert_v1alpha4_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha2_NnfStorageProfileList(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha3.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { out.ZpoolCreate = in.ZpoolCreate out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget return nil } -// Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha3.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) +// Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *v1alpha3.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { out.ZpoolCreate = in.ZpoolCreate out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget @@ -2870,7 +2870,7 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorage return nil } -func autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha3.NnfStorageProfileLustreData, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { out.CombinedMGTMDT = in.CombinedMGTMDT out.ExternalMGS = in.ExternalMGS out.CapacityMGT = in.CapacityMGT @@ -2878,28 +2878,28 @@ func autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProf out.ExclusiveMDT = in.ExclusiveMDT out.CapacityScalingFactor = in.CapacityScalingFactor out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { return err } - if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + if err := Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { return err } out.MountRabbit = in.MountRabbit @@ -2907,12 +2907,12 @@ func autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProf return nil } -// Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha3.NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in, out, s) +// Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *v1alpha3.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { out.CombinedMGTMDT = in.CombinedMGTMDT out.ExternalMGS = in.ExternalMGS out.CapacityMGT = in.CapacityMGT @@ -2920,28 +2920,28 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProf out.ExclusiveMDT = in.ExclusiveMDT out.CapacityScalingFactor = in.CapacityScalingFactor out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { return err } - if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { return err } out.MountRabbit = in.MountRabbit @@ -2949,12 +2949,12 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProf return nil } -// Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *v1alpha3.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha2_NnfStorageProfileLustreData(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha3.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { out.ColocateComputes = in.ColocateComputes out.Count = in.Count out.Scale = in.Scale @@ -2962,12 +2962,12 @@ func autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStor return nil } -// Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha3.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in, out, s) +// Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *v1alpha3.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { out.ColocateComputes = in.ColocateComputes out.Count = in.Count out.Scale = in.Scale @@ -2975,13 +2975,13 @@ func autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStor return nil } -// Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *v1alpha3.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in, out, s) +// Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha2_NnfStorageProfileLustreMiscOptions(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha3.NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -2989,13 +2989,13 @@ func autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfile return nil } -// Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha3.NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in, out, s) +// Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *v1alpha3.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3003,13 +3003,13 @@ func autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfile return nil } -// Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *v1alpha3.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha2_NnfStorageProfileRawData(in, out, s) } -func autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha3.NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3017,13 +3017,13 @@ func autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfile return nil } -// Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha3.NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in, out, s) +// Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in, out, s) } -func autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *v1alpha3.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { +func autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { return err } out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) @@ -3031,25 +3031,25 @@ func autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfile return nil } -// Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *v1alpha3.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in, out, s) +// Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha2_NnfStorageProfileXFSData(in, out, s) } -func autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha3.NnfStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { out.FileSystemType = in.FileSystemType out.UserID = in.UserID out.GroupID = in.GroupID - out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) return nil } -// Convert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha3.NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in, out, s) +// Convert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *v1alpha3.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { out.FileSystemType = in.FileSystemType out.UserID = in.UserID out.GroupID = in.GroupID @@ -3057,28 +3057,28 @@ func autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *v1alpha3 return nil } -// Convert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *v1alpha3.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in, out, s) +// Convert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha2_NnfStorageSpec(in, out, s) } -func autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha3.NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { +func autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha2_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { return err } - out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) out.ResourceError = in.ResourceError out.Ready = in.Ready return nil } -// Convert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha3.NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in, out, s) +// Convert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1alpha3.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { +func autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { return err } out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) @@ -3087,73 +3087,73 @@ func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1al return nil } -// Convert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1alpha3.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in, out, s) +// Convert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in, out, s) } -func autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha3.NnfSystemStorage, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha3.NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in, out, s) +// Convert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *v1alpha3.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *v1alpha3.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in, out, s) +// Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in, out, s) } -func autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha3.NnfSystemStorageList, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha3.NnfSystemStorage)(unsafe.Pointer(&in.Items)) + out.Items = *(*[]v1alpha4.NnfSystemStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha2_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha2_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha3.NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in, out, s) +// Convert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *v1alpha3.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]NnfSystemStorage)(unsafe.Pointer(&in.Items)) return nil } -// Convert_v1alpha3_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *v1alpha3.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in, out, s) +// Convert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in, out, s) } -func autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha3.NnfSystemStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) out.ExcludeDisabledRabbits = in.ExcludeDisabledRabbits out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) - out.ComputesTarget = v1alpha3.NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesTarget = v1alpha4.NnfSystemStorageComputesTarget(in.ComputesTarget) out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) out.Capacity = in.Capacity out.Type = in.Type @@ -3163,12 +3163,12 @@ func autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec( return nil } -// Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha3.NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) +// Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *v1alpha3.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { out.SystemConfiguration = in.SystemConfiguration out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) @@ -3185,29 +3185,29 @@ func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec( return nil } -// Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *v1alpha3.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) +// Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) } -func autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in, out, s) +// Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in, out, s) } -func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { +func autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError return nil } -// Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in, out, s) +// Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in, out, s) } diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go new file mode 100644 index 00000000..1f6d905f --- /dev/null +++ b/api/v1alpha3/zz_generated.conversion.go @@ -0,0 +1,3207 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + unsafe "unsafe" + + v1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + v1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" + v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha4.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha4.LustreStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(a.(*v1alpha4.NnfAccess), b.(*NnfAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha4.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha4.NnfAccessList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList(a.(*v1alpha4.NnfAccessList), b.(*NnfAccessList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha4.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha4.NnfAccessSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(a.(*v1alpha4.NnfAccessStatus), b.(*NnfAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha4.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha4.NnfContainerProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(a.(*v1alpha4.NnfContainerProfile), b.(*NnfContainerProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha4.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha4.NnfContainerProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(a.(*v1alpha4.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha4.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha4.NnfContainerProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(a.(*v1alpha4.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha4.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha4.NnfContainerProfileStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(a.(*v1alpha4.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha4.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha4.NnfDataMovement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement(a.(*v1alpha4.NnfDataMovement), b.(*NnfDataMovement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha4.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha4.NnfDataMovementCommandStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(a.(*v1alpha4.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha4.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha4.NnfDataMovementConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(a.(*v1alpha4.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha4.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha4.NnfDataMovementList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(a.(*v1alpha4.NnfDataMovementList), b.(*NnfDataMovementList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha4.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha4.NnfDataMovementManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(a.(*v1alpha4.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha4.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha4.NnfDataMovementManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(a.(*v1alpha4.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha4.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha4.NnfDataMovementManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(a.(*v1alpha4.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha4.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha4.NnfDataMovementManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(a.(*v1alpha4.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha4.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha4.NnfDataMovementProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(a.(*v1alpha4.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha4.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha4.NnfDataMovementProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(a.(*v1alpha4.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha4.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha4.NnfDataMovementSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(a.(*v1alpha4.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha4.NnfDataMovementSpecSourceDestination), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(a.(*v1alpha4.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha4.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha4.NnfDataMovementStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(a.(*v1alpha4.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha4.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha4.NnfDriveStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(a.(*v1alpha4.NnfDriveStatus), b.(*NnfDriveStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha4.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha4.NnfLustreMGT), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(a.(*v1alpha4.NnfLustreMGT), b.(*NnfLustreMGT), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha4.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha4.NnfLustreMGTList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(a.(*v1alpha4.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha4.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha4.NnfLustreMGTSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(a.(*v1alpha4.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha4.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha4.NnfLustreMGTStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(a.(*v1alpha4.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha4.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha4.NnfLustreMGTStatusClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(a.(*v1alpha4.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha4.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNode_To_v1alpha4_NnfNode(a.(*NnfNode), b.(*v1alpha4.NnfNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNode_To_v1alpha3_NnfNode(a.(*v1alpha4.NnfNode), b.(*NnfNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha4.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha4.NnfNodeBlockStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(a.(*v1alpha4.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha4.NnfNodeBlockStorageAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(a.(*v1alpha4.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha4.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha4.NnfNodeBlockStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(a.(*v1alpha4.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha4.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha4.NnfNodeBlockStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(a.(*v1alpha4.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha4.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha4.NnfNodeBlockStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(a.(*v1alpha4.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha4.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha4.NnfNodeECData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData(a.(*v1alpha4.NnfNodeECData), b.(*NnfNodeECData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha4.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha4.NnfNodeECDataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(a.(*v1alpha4.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha4.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha4.NnfNodeECDataSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(a.(*v1alpha4.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha4.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha4.NnfNodeECDataStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(a.(*v1alpha4.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha4.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeList_To_v1alpha4_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha4.NnfNodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeList_To_v1alpha3_NnfNodeList(a.(*v1alpha4.NnfNodeList), b.(*NnfNodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha4.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha4.NnfNodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(a.(*v1alpha4.NnfNodeSpec), b.(*NnfNodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha4.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha4.NnfNodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(a.(*v1alpha4.NnfNodeStatus), b.(*NnfNodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha4.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha4.NnfNodeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(a.(*v1alpha4.NnfNodeStorage), b.(*NnfNodeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha4.NnfNodeStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(a.(*v1alpha4.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha4.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha4.NnfNodeStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(a.(*v1alpha4.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha4.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha4.NnfNodeStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(a.(*v1alpha4.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha4.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha4.NnfNodeStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(a.(*v1alpha4.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha4.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha4.NnfPortManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager(a.(*v1alpha4.NnfPortManager), b.(*NnfPortManager), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha4.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha4.NnfPortManagerAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(a.(*v1alpha4.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha4.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha4.NnfPortManagerAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(a.(*v1alpha4.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha4.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha4.NnfPortManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(a.(*v1alpha4.NnfPortManagerList), b.(*NnfPortManagerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha4.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha4.NnfPortManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(a.(*v1alpha4.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha4.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha4.NnfPortManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(a.(*v1alpha4.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha4.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha4.NnfResourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(a.(*v1alpha4.NnfResourceStatus), b.(*NnfResourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha4.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfServerStatus_To_v1alpha4_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha4.NnfServerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfServerStatus_To_v1alpha3_NnfServerStatus(a.(*v1alpha4.NnfServerStatus), b.(*NnfServerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha4.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(a.(*NnfStorage), b.(*v1alpha4.NnfStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(a.(*v1alpha4.NnfStorage), b.(*NnfStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha4.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha4.NnfStorageAllocationNodes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(a.(*v1alpha4.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha4.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha4.NnfStorageAllocationSetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(a.(*v1alpha4.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha4.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha4.NnfStorageAllocationSetStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(a.(*v1alpha4.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha4.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha4.NnfStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList(a.(*v1alpha4.NnfStorageList), b.(*NnfStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha4.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha4.NnfStorageLustreSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(a.(*v1alpha4.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha4.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha4.NnfStorageLustreStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(a.(*v1alpha4.NnfStorageProfile), b.(*NnfStorageProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha4.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha4.NnfStorageProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(a.(*v1alpha4.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha4.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha4.NnfStorageProfileGFS2Data), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(a.(*v1alpha4.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha4.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha4.NnfStorageProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(a.(*v1alpha4.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha4.NnfStorageProfileLustreCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha4.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha4.NnfStorageProfileLustreData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(a.(*v1alpha4.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha4.NnfStorageProfileLustreMiscOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(a.(*v1alpha4.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha4.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha4.NnfStorageProfileRawData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(a.(*v1alpha4.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha4.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha4.NnfStorageProfileXFSData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(a.(*v1alpha4.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha4.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha4.NnfStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(a.(*v1alpha4.NnfStorageSpec), b.(*NnfStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha4.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha4.NnfStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(a.(*v1alpha4.NnfStorageStatus), b.(*NnfStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha4.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha4.NnfSystemStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(a.(*v1alpha4.NnfSystemStorage), b.(*NnfSystemStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha4.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha4.NnfSystemStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(a.(*v1alpha4.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha4.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha4.NnfSystemStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(a.(*v1alpha4.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.TargetType = in.TargetType + out.StartIndex = in.StartIndex + out.MgsAddress = in.MgsAddress + out.BackFs = in.BackFs + return nil +} + +// Convert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in, out, s) +} + +func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.TargetType = in.TargetType + out.StartIndex = in.StartIndex + out.MgsAddress = in.MgsAddress + out.BackFs = in.BackFs + return nil +} + +// Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(in, out, s) +} + +func autoConvert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(in, out, s) +} + +func autoConvert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(in, out, s) +} + +func autoConvert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList(in, out, s) +} + +func autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { + out.DesiredState = in.DesiredState + out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) + out.Target = in.Target + out.UserID = in.UserID + out.GroupID = in.GroupID + out.ClientReference = in.ClientReference + out.MountPath = in.MountPath + out.MakeClientMounts = in.MakeClientMounts + out.MountPathPrefix = in.MountPathPrefix + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + out.DesiredState = in.DesiredState + out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) + out.Target = in.Target + out.UserID = in.UserID + out.GroupID = in.GroupID + out.ClientReference = in.ClientReference + out.MountPath = in.MountPath + out.MakeClientMounts = in.MakeClientMounts + out.MountPathPrefix = in.MountPathPrefix + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { + out.State = in.State + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + out.State = in.State + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in, out, s) +} + +func autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha3_NnfContainerProfile(in, out, s) +} + +func autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { + out.Pinned = in.Pinned + out.Storages = *(*[]v1alpha4.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) + out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) + out.RetryLimit = in.RetryLimit + out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) + out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) + out.NumPorts = in.NumPorts + out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) + out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) + return nil +} + +// Convert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in, out, s) +} + +func autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + out.Pinned = in.Pinned + out.Storages = *(*[]NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) + out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) + out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) + out.RetryLimit = in.RetryLimit + out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) + out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) + out.NumPorts = in.NumPorts + out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) + out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) + return nil +} + +// Convert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha3_NnfContainerProfileData(in, out, s) +} + +func autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfContainerProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha3_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in, out, s) +} + +func autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfContainerProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha3_NnfContainerProfileList(in, out, s) +} + +func autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { + out.Name = in.Name + out.Optional = in.Optional + out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) + return nil +} + +// Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha3_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in, out, s) +} + +func autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + out.Name = in.Name + out.Optional = in.Optional + out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) + return nil +} + +// Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha3_NnfContainerProfileStorage(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovement_To_v1alpha4_NnfDataMovement(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovement_To_v1alpha3_NnfDataMovement(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { + out.Command = in.Command + out.ElapsedTime = in.ElapsedTime + out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) + out.LastMessage = in.LastMessage + out.LastMessageTime = in.LastMessageTime + out.Seconds = in.Seconds + out.Items = (*int32)(unsafe.Pointer(in.Items)) + out.Directories = (*int32)(unsafe.Pointer(in.Directories)) + out.Files = (*int32)(unsafe.Pointer(in.Files)) + out.Links = (*int32)(unsafe.Pointer(in.Links)) + out.Data = in.Data + out.Rate = in.Rate + return nil +} + +// Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + out.Command = in.Command + out.ElapsedTime = in.ElapsedTime + out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) + out.LastMessage = in.LastMessage + out.LastMessageTime = in.LastMessageTime + out.Seconds = in.Seconds + out.Items = (*int32)(unsafe.Pointer(in.Items)) + out.Directories = (*int32)(unsafe.Pointer(in.Directories)) + out.Files = (*int32)(unsafe.Pointer(in.Files)) + out.Links = (*int32)(unsafe.Pointer(in.Links)) + out.Data = in.Data + out.Rate = in.Rate + return nil +} + +// Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha3_NnfDataMovementCommandStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { + out.Dryrun = in.Dryrun + out.MpirunOptions = in.MpirunOptions + out.DcpOptions = in.DcpOptions + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.Slots = (*int)(unsafe.Pointer(in.Slots)) + out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) + return nil +} + +// Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + out.Dryrun = in.Dryrun + out.MpirunOptions = in.MpirunOptions + out.DcpOptions = in.DcpOptions + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.Slots = (*int)(unsafe.Pointer(in.Slots)) + out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) + return nil +} + +// Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha3_NnfDataMovementConfig(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfDataMovement)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfDataMovementList_To_v1alpha4_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovement)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfDataMovementList_To_v1alpha3_NnfDataMovementList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha3_NnfDataMovementList(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha3_NnfDataMovementManager(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovementManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha3_NnfDataMovementManagerList(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Template = in.Template + out.UpdateStrategy = in.UpdateStrategy + out.HostPath = in.HostPath + out.MountPath = in.MountPath + return nil +} + +// Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Template = in.Template + out.UpdateStrategy = in.UpdateStrategy + out.HostPath = in.HostPath + out.MountPath = in.MountPath + return nil +} + +// Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha3_NnfDataMovementManagerSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha3_NnfDataMovementManagerStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + out.Slots = in.Slots + out.MaxSlots = in.MaxSlots + out.Command = in.Command + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.ProgressIntervalSeconds = in.ProgressIntervalSeconds + out.CreateDestDir = in.CreateDestDir + out.StatCommand = in.StatCommand + return nil +} + +// Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + out.Slots = in.Slots + out.MaxSlots = in.MaxSlots + out.Command = in.Command + out.LogStdout = in.LogStdout + out.StoreStdout = in.StoreStdout + out.ProgressIntervalSeconds = in.ProgressIntervalSeconds + out.CreateDestDir = in.CreateDestDir + out.StatCommand = in.StatCommand + return nil +} + +// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) + out.UserId = in.UserId + out.GroupId = in.GroupId + out.Cancel = in.Cancel + out.ProfileReference = in.ProfileReference + out.UserConfig = (*v1alpha4.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + return nil +} + +// Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + out.Source = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) + out.Destination = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) + out.UserId = in.UserId + out.GroupId = in.GroupId + out.Cancel = in.Cancel + out.ProfileReference = in.ProfileReference + out.UserConfig = (*NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) + return nil +} + +// Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha3_NnfDataMovementSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + out.Path = in.Path + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + out.Path = in.Path + out.StorageReference = in.StorageReference + return nil +} + +// Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha3_NnfDataMovementSpecSourceDestination(in, out, s) +} + +func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { + out.State = in.State + out.Status = in.Status + out.Message = in.Message + out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) + out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) + out.Restarts = in.Restarts + out.CommandStatus = (*v1alpha4.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + out.State = in.State + out.Status = in.Status + out.Message = in.Message + out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) + out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) + out.Restarts = in.Restarts + out.CommandStatus = (*NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = in.WearLevel + if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfDriveStatus_To_v1alpha4_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = in.WearLevel + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfDriveStatus_To_v1alpha3_NnfDriveStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha3_NnfDriveStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in, out, s) +} + +func autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha3_NnfLustreMGT(in, out, s) +} + +func autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfLustreMGT)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha3_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in, out, s) +} + +func autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfLustreMGT)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha3_NnfLustreMGTList(in, out, s) +} + +func autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) + out.FsNameStart = in.FsNameStart + out.FsNameStartReference = in.FsNameStartReference + out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) + return nil +} + +// Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) + out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) + out.FsNameStart = in.FsNameStart + out.FsNameStartReference = in.FsNameStartReference + out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) + return nil +} + +// Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha3_NnfLustreMGTSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { + out.FsNameNext = in.FsNameNext + out.ClaimList = *(*[]v1alpha4.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + out.FsNameNext = in.FsNameNext + out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { + out.Reference = in.Reference + out.FsName = in.FsName + return nil +} + +// Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in, out, s) +} + +func autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + out.Reference = in.Reference + out.FsName = in.FsName + return nil +} + +// Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim is an autogenerated conversion function. +func Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha3_NnfLustreMGTStatusClaim(in, out, s) +} + +func autoConvert_v1alpha3_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfNode_To_v1alpha4_NnfNode is an autogenerated conversion function. +func Convert_v1alpha3_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNode_To_v1alpha4_NnfNode(in, out, s) +} + +func autoConvert_v1alpha4_NnfNode_To_v1alpha3_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfNode_To_v1alpha3_NnfNode is an autogenerated conversion function. +func Convert_v1alpha4_NnfNode_To_v1alpha3_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNode_To_v1alpha3_NnfNode(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha3_NnfNodeBlockStorage(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) + out.StorageGroupId = in.StorageGroupId + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) + out.StorageGroupId = in.StorageGroupId + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha3_NnfNodeBlockStorageAccessStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + out.Capacity = in.Capacity + out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + out.Capacity = in.Capacity + out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha3_NnfNodeBlockStorageAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]v1alpha4.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]v1alpha4.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) + out.CapacityAllocated = in.CapacityAllocated + out.StoragePoolId = in.StoragePoolId + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + out.Accesses = *(*map[string]NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) + out.Devices = *(*[]NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) + out.CapacityAllocated = in.CapacityAllocated + out.StoragePoolId = in.StoragePoolId + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha3_NnfNodeBlockStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + out.NQN = in.NQN + out.NamespaceId = in.NamespaceId + out.CapacityAllocated = in.CapacityAllocated + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + out.NQN = in.NQN + out.NamespaceId = in.NamespaceId + out.CapacityAllocated = in.CapacityAllocated + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha3_NnfNodeBlockStorageDeviceStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha3_NnfNodeBlockStorageList(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { + out.SharedAllocation = in.SharedAllocation + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + out.SharedAllocation = in.SharedAllocation + out.Allocations = *(*[]NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha3_NnfNodeBlockStorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.ResourceError = in.ResourceError + out.PodStartTime = in.PodStartTime + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.ResourceError = in.ResourceError + out.PodStartTime = in.PodStartTime + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeECData_To_v1alpha4_NnfNodeECData(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECData_To_v1alpha3_NnfNodeECData(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfNodeECData)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeECData)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha3_NnfNodeECDataList(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha3_NnfNodeECDataSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]v1alpha4.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + out.Data = *(*map[string]NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha3_NnfNodeECDataStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfNode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfNodeList_To_v1alpha4_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeList_To_v1alpha4_NnfNodeList(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeList_To_v1alpha3_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNode)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfNodeList_To_v1alpha3_NnfNodeList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeList_To_v1alpha3_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeList_To_v1alpha3_NnfNodeList(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { + out.Name = in.Name + out.Pod = in.Pod + out.State = v1alpha4.NnfResourceStateType(in.State) + return nil +} + +// Convert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + out.Name = in.Name + out.Pod = in.Pod + out.State = NnfResourceStateType(in.State) + return nil +} + +// Convert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha3_NnfNodeSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) + out.Fenced = in.Fenced + out.LNetNid = in.LNetNid + out.Capacity = in.Capacity + out.CapacityAllocated = in.CapacityAllocated + out.Servers = *(*[]v1alpha4.NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]v1alpha4.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + return nil +} + +// Convert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + out.Status = NnfResourceStatusType(in.Status) + out.Health = NnfResourceHealthType(in.Health) + out.Fenced = in.Fenced + out.LNetNid = in.LNetNid + out.Capacity = in.Capacity + out.CapacityAllocated = in.CapacityAllocated + out.Servers = *(*[]NnfServerStatus)(unsafe.Pointer(&in.Servers)) + out.Drives = *(*[]NnfDriveStatus)(unsafe.Pointer(&in.Drives)) + return nil +} + +// Convert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha3_NnfNodeStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { + out.Count = in.Count + out.SharedAllocation = in.SharedAllocation + out.Capacity = in.Capacity + out.UserID = in.UserID + out.GroupID = in.GroupID + out.FileSystemType = in.FileSystemType + if err := Convert_v1alpha3_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + out.BlockReference = in.BlockReference + return nil +} + +// Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + out.Count = in.Count + out.SharedAllocation = in.SharedAllocation + out.Capacity = in.Capacity + out.UserID = in.UserID + out.GroupID = in.GroupID + out.FileSystemType = in.FileSystemType + if err := Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + out.BlockReference = in.BlockReference + return nil +} + +// Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha3_NnfNodeStorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManager_To_v1alpha4_NnfPortManager(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManager_To_v1alpha3_NnfPortManager(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { + out.Requester = in.Requester + out.Count = in.Count + return nil +} + +// Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + out.Requester = in.Requester + out.Count = in.Count + return nil +} + +// Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha3_NnfPortManagerAllocationSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { + out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) + out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) + out.Status = v1alpha4.NnfPortManagerAllocationStatusStatus(in.Status) + out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) + return nil +} + +// Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) + out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) + out.Status = NnfPortManagerAllocationStatusStatus(in.Status) + out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) + return nil +} + +// Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha3_NnfPortManagerAllocationStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfPortManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfPortManagerList_To_v1alpha4_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfPortManager)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfPortManagerList_To_v1alpha3_NnfPortManagerList is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha3_NnfPortManagerList(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.Allocations = *(*[]NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) + return nil +} + +// Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha3_NnfPortManagerSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = v1alpha4.NnfPortManagerStatusStatus(in.Status) + return nil +} + +// Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + out.Allocations = *(*[]NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) + out.Status = NnfPortManagerStatusStatus(in.Status) + return nil +} + +// Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha3_NnfPortManagerStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Status = v1alpha4.NnfResourceStatusType(in.Status) + out.Health = v1alpha4.NnfResourceHealthType(in.Health) + return nil +} + +// Convert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Status = NnfResourceStatusType(in.Status) + out.Health = NnfResourceHealthType(in.Health) + return nil +} + +// Convert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { + out.Hostname = in.Hostname + if err := Convert_v1alpha3_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfServerStatus_To_v1alpha4_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfServerStatus_To_v1alpha4_NnfServerStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + out.Hostname = in.Hostname + if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha3_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfServerStatus_To_v1alpha3_NnfServerStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfServerStatus_To_v1alpha3_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfServerStatus_To_v1alpha3_NnfServerStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { + out.Name = in.Name + out.Count = in.Count + return nil +} + +// Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + out.Name = in.Name + out.Count = in.Count + return nil +} + +// Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha3_NnfStorageAllocationNodes(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { + out.Name = in.Name + out.Capacity = in.Capacity + if err := Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + return err + } + out.SharedAllocation = in.SharedAllocation + out.Nodes = *(*[]v1alpha4.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + out.Name = in.Name + out.Capacity = in.Capacity + if err := Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { + return err + } + out.SharedAllocation = in.SharedAllocation + out.Nodes = *(*[]NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha3_NnfStorageAllocationSetSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAllocationSetStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { + out.TargetType = in.TargetType + out.BackFs = in.BackFs + out.MgsAddress = in.MgsAddress + out.PersistentMgsReference = in.PersistentMgsReference + return nil +} + +// Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + out.TargetType = in.TargetType + out.BackFs = in.BackFs + out.MgsAddress = in.MgsAddress + out.PersistentMgsReference = in.PersistentMgsReference + return nil +} + +// Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha3_NnfStorageLustreSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { + out.MgsAddress = in.MgsAddress + out.FileSystemName = in.FileSystemName + out.LustreMgtReference = in.LustreMgtReference + return nil +} + +// Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + out.MgsAddress = in.MgsAddress + out.FileSystemName = in.FileSystemName + out.LustreMgtReference = in.LustreMgtReference + return nil +} + +// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { + out.Mkfs = in.Mkfs + out.SharedVg = in.SharedVg + out.PvCreate = in.PvCreate + out.PvRemove = in.PvRemove + out.VgCreate = in.VgCreate + if err := Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + return err + } + out.VgRemove = in.VgRemove + out.LvCreate = in.LvCreate + if err := Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + return err + } + out.LvRemove = in.LvRemove + out.MountRabbit = in.MountRabbit + out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + out.MountCompute = in.MountCompute + out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + return nil +} + +// Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { + out.Mkfs = in.Mkfs + out.SharedVg = in.SharedVg + out.PvCreate = in.PvCreate + out.PvRemove = in.PvRemove + out.VgCreate = in.VgCreate + if err := Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { + return err + } + out.VgRemove = in.VgRemove + out.LvCreate = in.LvCreate + if err := Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { + return err + } + out.LvRemove = in.LvRemove + out.MountRabbit = in.MountRabbit + out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + out.MountCompute = in.MountCompute + out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + return nil +} + +// Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + if err := Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + out.Default = in.Default + out.Pinned = in.Pinned + if err := Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha3_NnfStorageProfileData(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha3_NnfStorageProfileGFS2Data(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + out.Activate = in.Activate + out.Deactivate = in.Deactivate + return nil +} + +// Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + out.Activate = in.Activate + out.Deactivate = in.Deactivate + return nil +} + +// Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + out.LockStart = in.LockStart + out.LockStop = in.LockStop + return nil +} + +// Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + out.LockStart = in.LockStart + out.LockStop = in.LockStop + return nil +} + +// Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfStorageProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfStorageProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + out.ZpoolCreate = in.ZpoolCreate + out.Mkfs = in.Mkfs + out.MountTarget = in.MountTarget + out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + return nil +} + +// Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + out.ZpoolCreate = in.ZpoolCreate + out.Mkfs = in.Mkfs + out.MountTarget = in.MountTarget + out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + return nil +} + +// Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { + out.CombinedMGTMDT = in.CombinedMGTMDT + out.ExternalMGS = in.ExternalMGS + out.CapacityMGT = in.CapacityMGT + out.CapacityMDT = in.CapacityMDT + out.ExclusiveMDT = in.ExclusiveMDT + out.CapacityScalingFactor = in.CapacityScalingFactor + out.StandaloneMGTPoolName = in.StandaloneMGTPoolName + if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + return err + } + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + out.CombinedMGTMDT = in.CombinedMGTMDT + out.ExternalMGS = in.ExternalMGS + out.CapacityMGT = in.CapacityMGT + out.CapacityMDT = in.CapacityMDT + out.ExclusiveMDT = in.ExclusiveMDT + out.CapacityScalingFactor = in.CapacityScalingFactor + out.StandaloneMGTPoolName = in.StandaloneMGTPoolName + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { + return err + } + out.MountRabbit = in.MountRabbit + out.MountCompute = in.MountCompute + return nil +} + +// Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha3_NnfStorageProfileLustreData(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + out.ColocateComputes = in.ColocateComputes + out.Count = in.Count + out.Scale = in.Scale + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + return nil +} + +// Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + out.ColocateComputes = in.ColocateComputes + out.Count = in.Count + out.Scale = in.Scale + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + return nil +} + +// Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha3_NnfStorageProfileLustreMiscOptions(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha3_NnfStorageProfileRawData(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { + return err + } + out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) + out.CapacityScalingFactor = in.CapacityScalingFactor + return nil +} + +// Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha3_NnfStorageProfileXFSData(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { + out.FileSystemType = in.FileSystemType + out.UserID = in.UserID + out.GroupID = in.GroupID + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + out.FileSystemType = in.FileSystemType + out.UserID = in.UserID + out.GroupID = in.GroupID + out.AllocationSets = *(*[]NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha3_NnfStorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha3_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { + return err + } + out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.ResourceError = in.ResourceError + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + if err := Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { + return err + } + out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) + out.ResourceError = in.ResourceError + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in, out, s) +} + +func autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in, out, s) +} + +func autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in, out, s) +} + +func autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha4.NnfSystemStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in, out, s) +} + +func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NnfSystemStorage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in, out, s) +} + +func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) + out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) + out.ExcludeDisabledRabbits = in.ExcludeDisabledRabbits + out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) + out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) + out.ComputesTarget = v1alpha4.NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) + out.Capacity = in.Capacity + out.Type = in.Type + out.StorageProfile = in.StorageProfile + out.MakeClientMounts = in.MakeClientMounts + out.ClientMountPath = in.ClientMountPath + return nil +} + +// Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in, out, s) +} + +func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { + out.SystemConfiguration = in.SystemConfiguration + out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) + out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) + out.ExcludeDisabledRabbits = in.ExcludeDisabledRabbits + out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) + out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) + out.ComputesTarget = NnfSystemStorageComputesTarget(in.ComputesTarget) + out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) + out.Capacity = in.Capacity + out.Type = in.Type + out.StorageProfile = in.StorageProfile + out.MakeClientMounts = in.MakeClientMounts + out.ClientMountPath = in.ClientMountPath + return nil +} + +// Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in, out, s) +} + +func autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.ResourceError = in.ResourceError + return nil +} + +// Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.deepcopy.go b/api/v1alpha3/zz_generated.deepcopy.go index 3011f264..e93ef123 100644 --- a/api/v1alpha3/zz_generated.deepcopy.go +++ b/api/v1alpha3/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha3 import ( "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/api/v1alpha4/zz_generated.deepcopy.go b/api/v1alpha4/zz_generated.deepcopy.go index 3bc08e49..a9b084bb 100644 --- a/api/v1alpha4/zz_generated.deepcopy.go +++ b/api/v1alpha4/zz_generated.deepcopy.go @@ -24,16 +24,33 @@ package v1alpha4 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LustreStorageSpec) DeepCopyInto(out *LustreStorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreStorageSpec. +func (in *LustreStorageSpec) DeepCopy() *LustreStorageSpec { + if in == nil { + return nil + } + out := new(LustreStorageSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfAccess) DeepCopyInto(out *NnfAccess) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccess. @@ -89,6 +106,8 @@ func (in *NnfAccessList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfAccessSpec) DeepCopyInto(out *NnfAccessSpec) { *out = *in + out.ClientReference = in.ClientReference + out.StorageReference = in.StorageReference } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessSpec. @@ -104,6 +123,7 @@ func (in *NnfAccessSpec) DeepCopy() *NnfAccessSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfAccessStatus) DeepCopyInto(out *NnfAccessStatus) { *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessStatus. @@ -121,8 +141,7 @@ func (in *NnfContainerProfile) DeepCopyInto(out *NnfContainerProfile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Data.DeepCopyInto(&out.Data) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfile. @@ -143,6 +162,56 @@ func (in *NnfContainerProfile) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfContainerProfileData) DeepCopyInto(out *NnfContainerProfileData) { + *out = *in + if in.Storages != nil { + in, out := &in.Storages, &out.Storages + *out = make([]NnfContainerProfileStorage, len(*in)) + copy(*out, *in) + } + if in.PreRunTimeoutSeconds != nil { + in, out := &in.PreRunTimeoutSeconds, &out.PreRunTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.PostRunTimeoutSeconds != nil { + in, out := &in.PostRunTimeoutSeconds, &out.PostRunTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(uint32) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(uint32) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(v1.PodSpec) + (*in).DeepCopyInto(*out) + } + if in.MPISpec != nil { + in, out := &in.MPISpec, &out.MPISpec + *out = new(v2beta1.MPIJobSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileData. +func (in *NnfContainerProfileData) DeepCopy() *NnfContainerProfileData { + if in == nil { + return nil + } + out := new(NnfContainerProfileData) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfContainerProfileList) DeepCopyInto(out *NnfContainerProfileList) { *out = *in @@ -176,31 +245,16 @@ func (in *NnfContainerProfileList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfileSpec) DeepCopyInto(out *NnfContainerProfileSpec) { +func (in *NnfContainerProfileStorage) DeepCopyInto(out *NnfContainerProfileStorage) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileSpec. -func (in *NnfContainerProfileSpec) DeepCopy() *NnfContainerProfileSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStorage. +func (in *NnfContainerProfileStorage) DeepCopy() *NnfContainerProfileStorage { if in == nil { return nil } - out := new(NnfContainerProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfileStatus) DeepCopyInto(out *NnfContainerProfileStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStatus. -func (in *NnfContainerProfileStatus) DeepCopy() *NnfContainerProfileStatus { - if in == nil { - return nil - } - out := new(NnfContainerProfileStatus) + out := new(NnfContainerProfileStorage) in.DeepCopyInto(out) return out } @@ -210,8 +264,8 @@ func (in *NnfDataMovement) DeepCopyInto(out *NnfDataMovement) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovement. @@ -232,6 +286,73 @@ func (in *NnfDataMovement) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementCommandStatus) DeepCopyInto(out *NnfDataMovementCommandStatus) { + *out = *in + out.ElapsedTime = in.ElapsedTime + if in.ProgressPercentage != nil { + in, out := &in.ProgressPercentage, &out.ProgressPercentage + *out = new(int32) + **out = **in + } + in.LastMessageTime.DeepCopyInto(&out.LastMessageTime) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(int32) + **out = **in + } + if in.Directories != nil { + in, out := &in.Directories, &out.Directories + *out = new(int32) + **out = **in + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = new(int32) + **out = **in + } + if in.Links != nil { + in, out := &in.Links, &out.Links + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementCommandStatus. +func (in *NnfDataMovementCommandStatus) DeepCopy() *NnfDataMovementCommandStatus { + if in == nil { + return nil + } + out := new(NnfDataMovementCommandStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementConfig) DeepCopyInto(out *NnfDataMovementConfig) { + *out = *in + if in.Slots != nil { + in, out := &in.Slots, &out.Slots + *out = new(int) + **out = **in + } + if in.MaxSlots != nil { + in, out := &in.MaxSlots, &out.MaxSlots + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementConfig. +func (in *NnfDataMovementConfig) DeepCopy() *NnfDataMovementConfig { + if in == nil { + return nil + } + out := new(NnfDataMovementConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfDataMovementList) DeepCopyInto(out *NnfDataMovementList) { *out = *in @@ -269,7 +390,7 @@ func (in *NnfDataMovementManager) DeepCopyInto(out *NnfDataMovementManager) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } @@ -326,6 +447,9 @@ func (in *NnfDataMovementManagerList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfDataMovementManagerSpec) DeepCopyInto(out *NnfDataMovementManagerSpec) { *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerSpec. @@ -358,8 +482,7 @@ func (in *NnfDataMovementProfile) DeepCopyInto(out *NnfDataMovementProfile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + out.Data = in.Data } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfile. @@ -380,6 +503,21 @@ func (in *NnfDataMovementProfile) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfDataMovementProfileData) DeepCopyInto(out *NnfDataMovementProfileData) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileData. +func (in *NnfDataMovementProfileData) DeepCopy() *NnfDataMovementProfileData { + if in == nil { + return nil + } + out := new(NnfDataMovementProfileData) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfDataMovementProfileList) DeepCopyInto(out *NnfDataMovementProfileList) { *out = *in @@ -413,61 +551,93 @@ func (in *NnfDataMovementProfileList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementProfileSpec) DeepCopyInto(out *NnfDataMovementProfileSpec) { +func (in *NnfDataMovementSpec) DeepCopyInto(out *NnfDataMovementSpec) { *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(NnfDataMovementSpecSourceDestination) + **out = **in + } + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(NnfDataMovementSpecSourceDestination) + **out = **in + } + out.ProfileReference = in.ProfileReference + if in.UserConfig != nil { + in, out := &in.UserConfig, &out.UserConfig + *out = new(NnfDataMovementConfig) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileSpec. -func (in *NnfDataMovementProfileSpec) DeepCopy() *NnfDataMovementProfileSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpec. +func (in *NnfDataMovementSpec) DeepCopy() *NnfDataMovementSpec { if in == nil { return nil } - out := new(NnfDataMovementProfileSpec) + out := new(NnfDataMovementSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementProfileStatus) DeepCopyInto(out *NnfDataMovementProfileStatus) { +func (in *NnfDataMovementSpecSourceDestination) DeepCopyInto(out *NnfDataMovementSpecSourceDestination) { *out = *in + out.StorageReference = in.StorageReference } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileStatus. -func (in *NnfDataMovementProfileStatus) DeepCopy() *NnfDataMovementProfileStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpecSourceDestination. +func (in *NnfDataMovementSpecSourceDestination) DeepCopy() *NnfDataMovementSpecSourceDestination { if in == nil { return nil } - out := new(NnfDataMovementProfileStatus) + out := new(NnfDataMovementSpecSourceDestination) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementSpec) DeepCopyInto(out *NnfDataMovementSpec) { +func (in *NnfDataMovementStatus) DeepCopyInto(out *NnfDataMovementStatus) { *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.CommandStatus != nil { + in, out := &in.CommandStatus, &out.CommandStatus + *out = new(NnfDataMovementCommandStatus) + (*in).DeepCopyInto(*out) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpec. -func (in *NnfDataMovementSpec) DeepCopy() *NnfDataMovementSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementStatus. +func (in *NnfDataMovementStatus) DeepCopy() *NnfDataMovementStatus { if in == nil { return nil } - out := new(NnfDataMovementSpec) + out := new(NnfDataMovementStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementStatus) DeepCopyInto(out *NnfDataMovementStatus) { +func (in *NnfDriveStatus) DeepCopyInto(out *NnfDriveStatus) { *out = *in + out.NnfResourceStatus = in.NnfResourceStatus } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementStatus. -func (in *NnfDataMovementStatus) DeepCopy() *NnfDataMovementStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDriveStatus. +func (in *NnfDriveStatus) DeepCopy() *NnfDriveStatus { if in == nil { return nil } - out := new(NnfDataMovementStatus) + out := new(NnfDriveStatus) in.DeepCopyInto(out) return out } @@ -477,8 +647,8 @@ func (in *NnfLustreMGT) DeepCopyInto(out *NnfLustreMGT) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGT. @@ -534,6 +704,22 @@ func (in *NnfLustreMGTList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfLustreMGTSpec) DeepCopyInto(out *NnfLustreMGTSpec) { *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FsNameBlackList != nil { + in, out := &in.FsNameBlackList, &out.FsNameBlackList + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.FsNameStartReference = in.FsNameStartReference + if in.ClaimList != nil { + in, out := &in.ClaimList, &out.ClaimList + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTSpec. @@ -549,6 +735,12 @@ func (in *NnfLustreMGTSpec) DeepCopy() *NnfLustreMGTSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfLustreMGTStatus) DeepCopyInto(out *NnfLustreMGTStatus) { *out = *in + if in.ClaimList != nil { + in, out := &in.ClaimList, &out.ClaimList + *out = make([]NnfLustreMGTStatusClaim, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatus. @@ -561,13 +753,29 @@ func (in *NnfLustreMGTStatus) DeepCopy() *NnfLustreMGTStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfLustreMGTStatusClaim) DeepCopyInto(out *NnfLustreMGTStatusClaim) { + *out = *in + out.Reference = in.Reference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatusClaim. +func (in *NnfLustreMGTStatusClaim) DeepCopy() *NnfLustreMGTStatusClaim { + if in == nil { + return nil + } + out := new(NnfLustreMGTStatusClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNode) DeepCopyInto(out *NnfNode) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNode. @@ -593,8 +801,8 @@ func (in *NnfNodeBlockStorage) DeepCopyInto(out *NnfNodeBlockStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorage. @@ -615,6 +823,88 @@ func (in *NnfNodeBlockStorage) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopyInto(out *NnfNodeBlockStorageAccessStatus) { + *out = *in + if in.DevicePaths != nil { + in, out := &in.DevicePaths, &out.DevicePaths + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAccessStatus. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopy() *NnfNodeBlockStorageAccessStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAccessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopyInto(out *NnfNodeBlockStorageAllocationSpec) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationSpec. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopy() *NnfNodeBlockStorageAllocationSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopyInto(out *NnfNodeBlockStorageAllocationStatus) { + *out = *in + if in.Accesses != nil { + in, out := &in.Accesses, &out.Accesses + *out = make(map[string]NnfNodeBlockStorageAccessStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NnfNodeBlockStorageDeviceStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationStatus. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopy() *NnfNodeBlockStorageAllocationStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopyInto(out *NnfNodeBlockStorageDeviceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageDeviceStatus. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopy() *NnfNodeBlockStorageDeviceStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageDeviceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeBlockStorageList) DeepCopyInto(out *NnfNodeBlockStorageList) { *out = *in @@ -650,6 +940,13 @@ func (in *NnfNodeBlockStorageList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeBlockStorageSpec) DeepCopyInto(out *NnfNodeBlockStorageSpec) { *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageSpec. @@ -665,6 +962,15 @@ func (in *NnfNodeBlockStorageSpec) DeepCopy() *NnfNodeBlockStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeBlockStorageStatus) DeepCopyInto(out *NnfNodeBlockStorageStatus) { *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ResourceError.DeepCopyInto(&out.ResourceError) + in.PodStartTime.DeepCopyInto(&out.PodStartTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageStatus. @@ -683,7 +989,7 @@ func (in *NnfNodeECData) DeepCopyInto(out *NnfNodeECData) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECData. @@ -754,6 +1060,24 @@ func (in *NnfNodeECDataSpec) DeepCopy() *NnfNodeECDataSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeECDataStatus) DeepCopyInto(out *NnfNodeECDataStatus) { *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataStatus. @@ -766,6 +1090,27 @@ func (in *NnfNodeECDataStatus) DeepCopy() *NnfNodeECDataStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in NnfNodeECPrivateData) DeepCopyInto(out *NnfNodeECPrivateData) { + { + in := &in + *out = make(NnfNodeECPrivateData, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECPrivateData. +func (in NnfNodeECPrivateData) DeepCopy() NnfNodeECPrivateData { + if in == nil { + return nil + } + out := new(NnfNodeECPrivateData) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeList) DeepCopyInto(out *NnfNodeList) { *out = *in @@ -816,6 +1161,16 @@ func (in *NnfNodeSpec) DeepCopy() *NnfNodeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStatus) DeepCopyInto(out *NnfNodeStatus) { *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]NnfServerStatus, len(*in)) + copy(*out, *in) + } + if in.Drives != nil { + in, out := &in.Drives, &out.Drives + *out = make([]NnfDriveStatus, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStatus. @@ -834,7 +1189,7 @@ func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorage. @@ -855,6 +1210,21 @@ func (in *NnfNodeStorage) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeStorageAllocationStatus) DeepCopyInto(out *NnfNodeStorageAllocationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageAllocationStatus. +func (in *NnfNodeStorageAllocationStatus) DeepCopy() *NnfNodeStorageAllocationStatus { + if in == nil { + return nil + } + out := new(NnfNodeStorageAllocationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageList) DeepCopyInto(out *NnfNodeStorageList) { *out = *in @@ -890,6 +1260,8 @@ func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { *out = *in + out.LustreStorage = in.LustreStorage + out.BlockReference = in.BlockReference } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageSpec. @@ -905,6 +1277,12 @@ func (in *NnfNodeStorageSpec) DeepCopy() *NnfNodeStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageStatus) DeepCopyInto(out *NnfNodeStorageStatus) { *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeStorageAllocationStatus, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageStatus. @@ -922,8 +1300,8 @@ func (in *NnfPortManager) DeepCopyInto(out *NnfPortManager) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManager. @@ -944,6 +1322,51 @@ func (in *NnfPortManager) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationSpec) DeepCopyInto(out *NnfPortManagerAllocationSpec) { + *out = *in + out.Requester = in.Requester +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationSpec. +func (in *NnfPortManagerAllocationSpec) DeepCopy() *NnfPortManagerAllocationSpec { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfPortManagerAllocationStatus) DeepCopyInto(out *NnfPortManagerAllocationStatus) { + *out = *in + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(v1.ObjectReference) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]uint16, len(*in)) + copy(*out, *in) + } + if in.TimeUnallocated != nil { + in, out := &in.TimeUnallocated, &out.TimeUnallocated + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationStatus. +func (in *NnfPortManagerAllocationStatus) DeepCopy() *NnfPortManagerAllocationStatus { + if in == nil { + return nil + } + out := new(NnfPortManagerAllocationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfPortManagerList) DeepCopyInto(out *NnfPortManagerList) { *out = *in @@ -979,6 +1402,12 @@ func (in *NnfPortManagerList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfPortManagerSpec) DeepCopyInto(out *NnfPortManagerSpec) { *out = *in + out.SystemConfiguration = in.SystemConfiguration + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerSpec. @@ -994,6 +1423,13 @@ func (in *NnfPortManagerSpec) DeepCopy() *NnfPortManagerSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfPortManagerStatus) DeepCopyInto(out *NnfPortManagerStatus) { *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfPortManagerAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerStatus. @@ -1006,13 +1442,44 @@ func (in *NnfPortManagerStatus) DeepCopy() *NnfPortManagerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfResourceStatus) DeepCopyInto(out *NnfResourceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfResourceStatus. +func (in *NnfResourceStatus) DeepCopy() *NnfResourceStatus { + if in == nil { + return nil + } + out := new(NnfResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfServerStatus) DeepCopyInto(out *NnfServerStatus) { + *out = *in + out.NnfResourceStatus = in.NnfResourceStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfServerStatus. +func (in *NnfServerStatus) DeepCopy() *NnfServerStatus { + if in == nil { + return nil + } + out := new(NnfServerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorage) DeepCopyInto(out *NnfStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorage. @@ -1033,6 +1500,57 @@ func (in *NnfStorage) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationNodes) DeepCopyInto(out *NnfStorageAllocationNodes) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationNodes. +func (in *NnfStorageAllocationNodes) DeepCopy() *NnfStorageAllocationNodes { + if in == nil { + return nil + } + out := new(NnfStorageAllocationNodes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationSetSpec) DeepCopyInto(out *NnfStorageAllocationSetSpec) { + *out = *in + out.NnfStorageLustreSpec = in.NnfStorageLustreSpec + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]NnfStorageAllocationNodes, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetSpec. +func (in *NnfStorageAllocationSetSpec) DeepCopy() *NnfStorageAllocationSetSpec { + if in == nil { + return nil + } + out := new(NnfStorageAllocationSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageAllocationSetStatus) DeepCopyInto(out *NnfStorageAllocationSetStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetStatus. +func (in *NnfStorageAllocationSetStatus) DeepCopy() *NnfStorageAllocationSetStatus { + if in == nil { + return nil + } + out := new(NnfStorageAllocationSetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageList) DeepCopyInto(out *NnfStorageList) { *out = *in @@ -1065,13 +1583,44 @@ func (in *NnfStorageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageLustreSpec) DeepCopyInto(out *NnfStorageLustreSpec) { + *out = *in + out.PersistentMgsReference = in.PersistentMgsReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreSpec. +func (in *NnfStorageLustreSpec) DeepCopy() *NnfStorageLustreSpec { + if in == nil { + return nil + } + out := new(NnfStorageLustreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageLustreStatus) DeepCopyInto(out *NnfStorageLustreStatus) { + *out = *in + out.LustreMgtReference = in.LustreMgtReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreStatus. +func (in *NnfStorageLustreStatus) DeepCopy() *NnfStorageLustreStatus { + if in == nil { + return nil + } + out := new(NnfStorageLustreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfile) DeepCopyInto(out *NnfStorageProfile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Data.DeepCopyInto(&out.Data) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfile. @@ -1092,6 +1641,103 @@ func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines) { + *out = *in + out.VgChange = in.VgChange + out.LvChange = in.LvChange + if in.PostActivate != nil { + in, out := &in.PostActivate, &out.PostActivate + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreDeactivate != nil { + in, out := &in.PreDeactivate, &out.PreDeactivate + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileCmdLines. +func (in *NnfStorageProfileCmdLines) DeepCopy() *NnfStorageProfileCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileData) DeepCopyInto(out *NnfStorageProfileData) { + *out = *in + in.LustreStorage.DeepCopyInto(&out.LustreStorage) + in.GFS2Storage.DeepCopyInto(&out.GFS2Storage) + in.XFSStorage.DeepCopyInto(&out.XFSStorage) + in.RawStorage.DeepCopyInto(&out.RawStorage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileData. +func (in *NnfStorageProfileData) DeepCopy() *NnfStorageProfileData { + if in == nil { + return nil + } + out := new(NnfStorageProfileData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileGFS2Data) DeepCopyInto(out *NnfStorageProfileGFS2Data) { + *out = *in + in.CmdLines.DeepCopyInto(&out.CmdLines) + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileGFS2Data. +func (in *NnfStorageProfileGFS2Data) DeepCopy() *NnfStorageProfileGFS2Data { + if in == nil { + return nil + } + out := new(NnfStorageProfileGFS2Data) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMLvChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMLvChangeCmdLines. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopy() *NnfStorageProfileLVMLvChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMLvChangeCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMVgChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMVgChangeCmdLines. +func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopy() *NnfStorageProfileLVMVgChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMVgChangeCmdLines) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileList) DeepCopyInto(out *NnfStorageProfileList) { *out = *in @@ -1125,31 +1771,111 @@ func (in *NnfStorageProfileList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileSpec) DeepCopyInto(out *NnfStorageProfileSpec) { +func (in *NnfStorageProfileLustreCmdLines) DeepCopyInto(out *NnfStorageProfileLustreCmdLines) { + *out = *in + if in.PostActivate != nil { + in, out := &in.PostActivate, &out.PostActivate + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreDeactivate != nil { + in, out := &in.PreDeactivate, &out.PreDeactivate + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreCmdLines. +func (in *NnfStorageProfileLustreCmdLines) DeepCopy() *NnfStorageProfileLustreCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreCmdLines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreData) DeepCopyInto(out *NnfStorageProfileLustreData) { + *out = *in + in.MgtCmdLines.DeepCopyInto(&out.MgtCmdLines) + in.MdtCmdLines.DeepCopyInto(&out.MdtCmdLines) + in.MgtMdtCmdLines.DeepCopyInto(&out.MgtMdtCmdLines) + in.OstCmdLines.DeepCopyInto(&out.OstCmdLines) + in.MgtOptions.DeepCopyInto(&out.MgtOptions) + in.MdtOptions.DeepCopyInto(&out.MdtOptions) + in.MgtMdtOptions.DeepCopyInto(&out.MgtMdtOptions) + in.OstOptions.DeepCopyInto(&out.OstOptions) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreData. +func (in *NnfStorageProfileLustreData) DeepCopy() *NnfStorageProfileLustreData { + if in == nil { + return nil + } + out := new(NnfStorageProfileLustreData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopyInto(out *NnfStorageProfileLustreMiscOptions) { *out = *in + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileSpec. -func (in *NnfStorageProfileSpec) DeepCopy() *NnfStorageProfileSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreMiscOptions. +func (in *NnfStorageProfileLustreMiscOptions) DeepCopy() *NnfStorageProfileLustreMiscOptions { if in == nil { return nil } - out := new(NnfStorageProfileSpec) + out := new(NnfStorageProfileLustreMiscOptions) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileStatus) DeepCopyInto(out *NnfStorageProfileStatus) { +func (in *NnfStorageProfileRawData) DeepCopyInto(out *NnfStorageProfileRawData) { *out = *in + in.CmdLines.DeepCopyInto(&out.CmdLines) + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileStatus. -func (in *NnfStorageProfileStatus) DeepCopy() *NnfStorageProfileStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileRawData. +func (in *NnfStorageProfileRawData) DeepCopy() *NnfStorageProfileRawData { if in == nil { return nil } - out := new(NnfStorageProfileStatus) + out := new(NnfStorageProfileRawData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileXFSData) DeepCopyInto(out *NnfStorageProfileXFSData) { + *out = *in + in.CmdLines.DeepCopyInto(&out.CmdLines) + if in.StorageLabels != nil { + in, out := &in.StorageLabels, &out.StorageLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileXFSData. +func (in *NnfStorageProfileXFSData) DeepCopy() *NnfStorageProfileXFSData { + if in == nil { + return nil + } + out := new(NnfStorageProfileXFSData) in.DeepCopyInto(out) return out } @@ -1157,6 +1883,13 @@ func (in *NnfStorageProfileStatus) DeepCopy() *NnfStorageProfileStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageSpec) DeepCopyInto(out *NnfStorageSpec) { *out = *in + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]NnfStorageAllocationSetSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageSpec. @@ -1172,6 +1905,13 @@ func (in *NnfStorageSpec) DeepCopy() *NnfStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageStatus) DeepCopyInto(out *NnfStorageStatus) { *out = *in + out.NnfStorageLustreStatus = in.NnfStorageLustreStatus + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]NnfStorageAllocationSetStatus, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageStatus. @@ -1189,8 +1929,8 @@ func (in *NnfSystemStorage) DeepCopyInto(out *NnfSystemStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorage. @@ -1246,6 +1986,33 @@ func (in *NnfSystemStorageList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfSystemStorageSpec) DeepCopyInto(out *NnfSystemStorageSpec) { *out = *in + out.SystemConfiguration = in.SystemConfiguration + if in.ExcludeRabbits != nil { + in, out := &in.ExcludeRabbits, &out.ExcludeRabbits + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeRabbits != nil { + in, out := &in.IncludeRabbits, &out.IncludeRabbits + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludeComputes != nil { + in, out := &in.ExcludeComputes, &out.ExcludeComputes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludeComputes != nil { + in, out := &in.IncludeComputes, &out.IncludeComputes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ComputesPattern != nil { + in, out := &in.ComputesPattern, &out.ComputesPattern + *out = make([]int, len(*in)) + copy(*out, *in) + } + out.StorageProfile = in.StorageProfile } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageSpec. @@ -1261,6 +2028,7 @@ func (in *NnfSystemStorageSpec) DeepCopy() *NnfSystemStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfSystemStorageStatus) DeepCopyInto(out *NnfSystemStorageStatus) { *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageStatus. diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml index fb038fb0..e070bfdf 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml @@ -761,6 +761,256 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The desired state + jsonPath: .spec.desiredState + name: DESIREDSTATE + type: string + - description: The current state + jsonPath: .status.state + name: STATE + type: string + - description: Whether the state has been achieved + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfAccess is the Schema for the nnfaccesses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfAccessSpec defines the desired state of NnfAccess + properties: + clientReference: + description: |- + ClientReference is for a client resource. (DWS) Computes is the only client + resource type currently supported + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + desiredState: + description: DesiredState is the desired state for the mounts on the + client + enum: + - mounted + - unmounted + type: string + groupID: + description: GroupID for the new mount. Currently only used for raw + format: int32 + type: integer + makeClientMounts: + default: true + description: |- + MakeClientMounts determines whether the ClientMount resources are made, or if only + the access list on the NnfNodeBlockStorage is updated + type: boolean + mountPath: + description: MountPath for the storage target on the client + type: string + mountPathPrefix: + type: string + storageReference: + description: StorageReference is the NnfStorage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + target: + description: |- + Target specifies which storage targets the client should mount + - single: Only one of the storage the client can access + - all: All of the storage the client can access + - shared: Multiple clients access the same storage + enum: + - single + - all + - shared + type: string + teardownState: + allOf: + - enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + - enum: + - PreRun + - PostRun + - Teardown + description: |- + TeardownState is the desired state of the workflow for this NNF Access resource to + be torn down and deleted. + type: string + userID: + description: UserID for the new mount. Currently only used for raw + format: int32 + type: integer + required: + - desiredState + - groupID + - makeClientMounts + - storageReference + - target + - teardownState + - userID + type: object + status: + description: NnfAccessStatus defines the observed state of NnfAccess + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready signifies whether status.state has been achieved + type: boolean + state: + description: State is the current state + enum: + - mounted + - unmounted + type: string + required: + - ready + - state + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml index 44fffade..41a62995 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml @@ -44548,4 +44548,14849 @@ spec: - data type: object served: true + storage: false + - name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfContainerProfile is the Schema for the nnfcontainerprofiles + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfContainerProfileSpec defines the desired state of NnfContainerProfile + properties: + groupID: + description: |- + GroupID specifies the group ID that is allowed to use this profile. If this is specified, + only Workflows that have a matching group ID can select this profile. + format: int32 + type: integer + mpiSpec: + description: |- + MPIJobSpec to define the MPI containers created from this profile. This functionality is + provided via mpi-operator, a 3rd party tool to assist in running MPI applications across + worker containers. + Either this or Spec must be provided, but not both. + + + All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for + more detail: + https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 + + + Note: most of these fields are fully customizable with a few exceptions. These fields are + overridden by NNF software to ensure proper behavior to interface with the DWS workflow + - Replicas + - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) + - Worker/Launcher.RestartPolicy + properties: + mpiImplementation: + default: OpenMPI + description: |- + MPIImplementation is the MPI implementation. + Options are "OpenMPI" (default) and "Intel". + enum: + - OpenMPI + - Intel + type: string + mpiReplicaSpecs: + additionalProperties: + description: ReplicaSpec is a description of the replica + properties: + replicas: + description: |- + Replicas is the desired number of replicas of the given template. + If unspecified, defaults to 1. + format: int32 + type: integer + restartPolicy: + description: |- + Restart policy for all replicas within the job. + One of Always, OnFailure, Never and ExitCode. + Default to Never. + type: string + template: + description: |- + Template is the object that describes the pod that + will be created for this replica. RestartPolicy in PodTemplateSpec + will be overide by RestartPolicy in ReplicaSpec + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the + same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates + whether a service account token should be automatically + mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that + you want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS + resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for + ephemeral containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral + containers. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral + containers. + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that + you want to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if + value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a + secret in the pod's namespace + properties: + key: + description: The key of the + secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether + the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select + from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier + to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the + Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the + http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to + set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header + field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on + the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a + network port in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent + POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action + to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action + involving a GRPC port. + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http + request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action + involving a TCP port. + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the pod + IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of + block devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path + inside of the container that the device + will be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the + pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference + to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition + in the pod's condition list with matching + type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find + the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to + a Pod to guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter + to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given + topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume + in a pod that may be accessed by any container + in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure + Data Disk mount on the host and bind mount + to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host + Caching mode: None, Read Only, Read + Write.' + type: string + diskName: + description: diskName is the Name of the + data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data + disk in the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are + Shared: multiple blob disks per storage + account Dedicated: single blob disk + per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure + File Service mount on the host and bind + mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of + secret that contains Azure Storage Account + Name and Key + type: string + shareName: + description: shareName is the azure share + Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount + on the host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as + the mounted root, rather than the full + Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap + that should populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) + represents ephemeral storage that is handled + by certain external CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward + API about the pod that should populate this + volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward + API volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type + of resource being referenced + type: string + name: + description: Name is the name + of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type + of resource being referenced + type: string + name: + description: Name is the name + of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim + references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label + query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the + binding reference to the PersistentVolume + backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel + resource that is attached to a kubelet's + host machine and then exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target + lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: + FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the + driver to use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this + field holds extra command options if + any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker + volume attached to a kubelet's host machine. + This depends on the Flocker control service + being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of + the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash + for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines + whether support iSCSI Discovery CHAP + authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether + support iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target + Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret + for iSCSI target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents + a PhotonController persistent disk attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies + Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx + volume attached and mounted on kubelets + host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies + a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one + resources secrets, configmaps, and downward + API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume + projections + items: + description: Projection that may be + projected along with other supported + volume types + properties: + configMap: + description: configMap information + about the configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string + key to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify + whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information + about the downwardAPI data to + project + properties: + items: + description: Items is a list + of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile + represents information to + create the file containing + the pod field + properties: + fieldRef: + description: 'Required: + Selects a field of the + pod: only annotations, + labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version + of the schema the + FieldPath is written + in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path + of the field to + select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: + Path is the relative + path name of the file + to be created. Must + not be absolute or contain + the ''..'' path. Must + be utf-8 encoded. The + first item of the relative + path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container + name: required for + volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies + the output format + of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information + about the secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string + key to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field + specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken + is information about the serviceAccountToken + data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte + mount on the host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by + name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO + persistent volume attached and mounted on + Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address + of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name + of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable + SSL communication with Gateway, default + false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO + Storage Pool associated with the protection + domain. + type: string + system: + description: system is the name of the + storage system as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS + volume attached and mounted on Kubernetes + nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere + volume attached and mounted on kubelets + host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage + Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the + storage Policy Based Management (SPBM) + profile name. + type: string + volumePath: + description: volumePath is the path that + identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + type: object + description: |- + MPIReplicaSpecs contains maps from `MPIReplicaType` to `ReplicaSpec` that + specify the MPI replicas to run. + type: object + runPolicy: + description: RunPolicy encapsulates various runtime policies of + the job. + properties: + activeDeadlineSeconds: + description: |- + Specifies the duration in seconds relative to the startTime that the job may be active + before the system tries to terminate it; value must be positive integer. + format: int64 + type: integer + backoffLimit: + description: Optional number of retries before marking this + job failed. + format: int32 + type: integer + cleanPodPolicy: + description: |- + CleanPodPolicy defines the policy to kill pods after the job completes. + Default to Running. + type: string + schedulingPolicy: + description: SchedulingPolicy defines the policy related to + scheduling, e.g. gang-scheduling + properties: + minAvailable: + description: |- + MinAvailable defines the minimal number of member to run the PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.mimMember` in PodGroup for the volcano. + When using this field, you need to make sure the application supports resizing (e.g., Elastic Horovod). + + + If not set, it defaults to the number of workers. + format: int32 + type: integer + minResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + MinResources defines the minimal resources of members to run the PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.mimResources` in PodGroup for volcano. + type: object + priorityClass: + description: |- + PriorityClass defines the PodGroup's PriorityClass. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.priorityClassName` in PodGroup for volcano. + type: string + queue: + description: |- + Queue defines the queue name to allocate resource for PodGroup. + If the gang-scheduling is set to the volcano, + input is passed to `.spec.queue` in PodGroup for the volcano. + type: string + scheduleTimeoutSeconds: + description: |- + SchedulerTimeoutSeconds defines the maximal time of members to wait before run the PodGroup. + Currently, this parameter isn't respected in any case. + TODO (tenzen-y): Modify comments when supporting scheduler-plugins. + format: int32 + type: integer + type: object + suspend: + default: false + description: |- + suspend specifies whether the MPIJob controller should create Pods or not. + If a MPIJob is created with suspend set to true, no Pods are created by + the MPIJob controller. If a MPIJob is suspended after creation (i.e. the + flag goes from false to true), the MPIJob controller will delete all + active Pods and PodGroups associated with this MPIJob. Also, it will suspend the + Launcher Job. Users must design their workload to gracefully handle this. + Suspending a Job will reset the StartTime field of the MPIJob. + + + Defaults to false. + type: boolean + ttlSecondsAfterFinished: + description: |- + TTLSecondsAfterFinished is the TTL to clean up jobs. + It may take extra ReconcilePeriod seconds for the cleanup, since + reconcile gets called periodically. + Default to infinite. + format: int32 + type: integer + type: object + slotsPerWorker: + default: 1 + description: |- + Specifies the number of slots per worker used in hostfile. + Defaults to 1. + format: int32 + type: integer + sshAuthMountPath: + default: /root/.ssh + description: |- + SSHAuthMountPath is the directory where SSH keys are mounted. + Defaults to "/root/.ssh". + type: string + required: + - mpiReplicaSpecs + type: object + numPorts: + description: |- + Number of ports to open for communication with the user container. These ports are opened on + the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). + The requested ports are made available as environment variables inside the container and in + the DWS workflow (NNF_CONTAINER_PORTS). + format: int32 + type: integer + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + postRunTimeoutSeconds: + default: 300 + description: |- + Containers are expected to complete in the PostRun State. Allow this many seconds for the + containers to exit before declaring an error the workflow. + Defaults to 300 if not set. A value of 0 disables this behavior. + format: int64 + minimum: 0 + type: integer + preRunTimeoutSeconds: + default: 300 + description: |- + Containers are launched in the PreRun state. Allow this many seconds for the containers to + start before declaring an error to the workflow. + Defaults to 300 if not set. A value of 0 disables this behavior. + format: int64 + minimum: 0 + type: integer + retryLimit: + default: 6 + description: |- + Specifies the number of times a container will be retried upon a failure. A new pod is + deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 + disables retries. + format: int32 + minimum: 0 + type: integer + spec: + description: |- + Spec to define the containers created from this profile. This is used for non-MPI containers. + Refer to the K8s documentation for `PodSpec` for more definition: + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec + Either this or MPISpec must be provided, but not both. + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a + service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to a pod + condition + properties: + conditionType: + description: ConditionType refers to a condition in the + pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to a Pod to guard + its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + storages: + description: List of possible filesystems supported by this container + profile + items: + description: |- + NnfContainerProfileStorage defines the mount point information that will be available to the + container + properties: + name: + description: 'Name specifies the name of the mounted filesystem; + must match the user supplied #DW directive' + type: string + optional: + default: false + description: |- + Optional designates that this filesystem is available to be mounted, but can be ignored by + the user not supplying this filesystem in the #DW directives + type: boolean + pvcMode: + description: |- + For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in + the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global + lustre, otherwise empty. + type: string + required: + - name + - optional + type: object + type: array + userID: + description: |- + UserID specifies the user ID that is allowed to use this profile. If this is specified, only + Workflows that have a matching user ID can select this profile. + format: int32 + type: integer + required: + - retryLimit + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + required: + - data + type: object + served: true storage: true diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml index dc6b2b94..6a3503f1 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml @@ -22145,6 +22145,7384 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: True if manager readied all resoures + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfDataMovementManager is the Schema for the nnfdatamovementmanagers + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager + properties: + hostPath: + description: Host Path defines the directory location of shared mounts + on an individual worker node. + type: string + mountPath: + description: Mount Path defines the location within the container + at which the Host Path volume should be mounted. + type: string + selector: + description: |- + Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated + to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + template: + description: |- + Template defines the pod template that is used for the basis of the worker Daemon Set that + manages the per node data movement operations. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + updateStrategy: + description: |- + UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set + that manages the per node data movement operations. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if type = "RollingUpdate". + --- + TODO: Update this to follow our convention for oneOf, whatever we decide it + to be. Same as Deployment `strategy.rollingUpdate`. + See https://github.com/kubernetes/kubernetes/issues/35345 + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of nodes with an existing available DaemonSet pod that + can have an updated DaemonSet pod during during an update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up to a minimum of 1. + Default value is 0. + Example: when this is set to 30%, at most 30% of the total number of nodes + that should be running the daemon pod (i.e. status.desiredNumberScheduled) + can have their a new pod created before the old pod is marked as deleted. + The update starts by launching new pods on 30% of nodes. Once an updated + pod is available (Ready for at least minReadySeconds) the old DaemonSet pod + on that node is marked deleted. If the old pod becomes unavailable for any + reason (Ready transitions to false, is evicted, or is drained) an updated + pod is immediatedly created on that node without considering surge limits. + Allowing surge implies the possibility that the resources consumed by the + daemonset on any given node can double if the readiness check fails, and + so resource intensive daemonsets should take into account that they may + cause evictions during disruption. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of DaemonSet pods that can be unavailable during the + update. Value can be an absolute number (ex: 5) or a percentage of total + number of DaemonSet pods at the start of the update (ex: 10%). Absolute + number is calculated from percentage by rounding up. + This cannot be 0 if MaxSurge is 0 + Default value is 1. + Example: when this is set to 30%, at most 30% of the total number of nodes + that should be running the daemon pod (i.e. status.desiredNumberScheduled) + can have their pods stopped for an update at any given time. The update + starts by stopping at most 30% of those DaemonSet pods and then brings + up new DaemonSet pods in their place. Once the new pods are available, + it then proceeds onto other DaemonSet pods, thus ensuring that at least + 70% of original number of DaemonSet pods are available at all times during + the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of daemon set update. Can be "RollingUpdate" + or "OnDelete". Default is RollingUpdate. + type: string + type: object + required: + - hostPath + - mountPath + - selector + - template + - updateStrategy + type: object + status: + description: NnfDataMovementManagerStatus defines the observed state of + NnfDataMovementManager + properties: + ready: + default: false + description: |- + Ready indicates that the Data Movement Manager has achieved the desired readiness state + and all managed resources are initialized. + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml index 1f99736c..b7ba3232 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml @@ -390,5 +390,131 @@ spec: type: object type: object served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: True if this is the default instance + jsonPath: .data.default + name: DEFAULT + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile + properties: + command: + default: ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE + dcp --progress 1 --uid $UID --gid $GID $SRC $DEST + description: |- + Command to execute to perform data movement. $VARS are replaced by the nnf software and must + be present in the command. + Available $VARS: + HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` + UID: User ID that is inherited from the Workflow + GID: Group ID that is inherited from the Workflow + SRC: source for the data movement + DEST destination for the data movement + type: string + createDestDir: + default: true + description: |- + CreateDestDir will ensure that the destination directory exists before performing data + movement. This will cause a number of stat commands to determine the source and destination + file types, so that the correct pathing for the destination can be determined. Then, a mkdir + is issued. + type: boolean + default: + default: false + description: Default is true if this instance is the default resource + to use + type: boolean + logStdout: + default: false + description: |- + If true, enable the command's stdout to be saved in the log when the command completes + successfully. On failure, the output is always logged. + type: boolean + maxSlots: + default: 0 + description: |- + MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the + use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + minimum: 0 + type: integer + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + progressIntervalSeconds: + default: 5 + description: |- + NnfDataMovement resources have the ability to collect and store the progress percentage and the + last few lines of output in the CommandStatus field. This number is used for the interval to collect + the progress data. `dcp --progress N` must be included in the data movement command in order for + progress to be collected. A value of 0 disables this functionality. + minimum: 0 + type: integer + slots: + default: 8 + description: |- + Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of + slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. + minimum: 0 + type: integer + statCommand: + default: mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- + setpriv --euid $UID --egid $GID --clear-groups stat --cached never + -c '%F' $PATH + description: |- + If CreateDestDir is true, then use StatCommand to perform the stat commands. + Use setpriv to stat the path with the specified UID/GID. + Available $VARS: + HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + slots/max_slots for each host. This hostfile is created at + `/tmp//hostfile`. This is the same hostfile used as the one for Command. + UID: User ID that is inherited from the Workflow + GID: Group ID that is inherited from the Workflow + PATH: Path to stat + type: string + storeStdout: + default: false + description: |- + Similar to logStdout, store the command's stdout in Status.Message when the command completes + successfully. On failure, the output is always stored. + type: boolean + required: + - command + - createDestDir + - maxSlots + - slots + - statCommand + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true storage: true subresources: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml index 50bed9b1..b4d185c6 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml @@ -1229,6 +1229,412 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current state + jsonPath: .status.state + name: STATE + type: string + - description: Status of current state + jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfDataMovement is the Schema for the nnfdatamovements API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfDataMovementSpec defines the desired state of NnfDataMovement + properties: + cancel: + default: false + description: Set to true if the data movement operation should be + canceled. + type: boolean + destination: + description: Destination describes the destination of the data movement + operation + properties: + path: + description: Path describes the location of the user data relative + to the storage instance + type: string + storageReference: + description: |- + Storage describes the storage backing this data movement specification; Storage can reference + either NNF storage or global Lustre storage depending on the object references Kind field. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + groupId: + description: |- + Group Id specifies the group ID for the data movement operation. This value is used + in conjunction with the user ID to ensure the user has valid permissions to perform + the data movement operation. + format: int32 + type: integer + profileReference: + description: |- + ProfileReference is an object reference to an NnfDataMovementProfile that is used to + configure data movement. If empty, the default profile is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + source: + description: Source describes the source of the data movement operation + properties: + path: + description: Path describes the location of the user data relative + to the storage instance + type: string + storageReference: + description: |- + Storage describes the storage backing this data movement specification; Storage can reference + either NNF storage or global Lustre storage depending on the object references Kind field. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + userConfig: + description: |- + User defined configuration on how data movement should be performed. This overrides the + configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values + are typically set by the Copy Offload API. + properties: + dcpOptions: + description: Extra options to pass to the dcp command (used to + perform data movement). + type: string + dryrun: + default: false + description: |- + Fake the Data Movement operation. The system "performs" Data Movement but the command to do so + is trivial. This means a Data Movement request is still submitted but the IO is skipped. + type: boolean + logStdout: + default: false + description: |- + If true, enable the command's stdout to be saved in the log when the command completes + successfully. On failure, the output is always logged. + Note: Enabling this option may degrade performance. + type: boolean + maxSlots: + description: |- + The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots + in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + type: integer + mpirunOptions: + description: Extra options to pass to the mpirun command (used + to perform data movement). + type: string + slots: + description: |- + The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in + the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. + type: integer + storeStdout: + default: false + description: |- + Similar to LogStdout, store the command's stdout in Status.Message when the command completes + successfully. On failure, the output is always stored. + Note: Enabling this option may degrade performance. + type: boolean + type: object + userId: + description: |- + User Id specifies the user ID for the data movement operation. This value is used + in conjunction with the group ID to ensure the user has valid permissions to perform + the data movement operation. + format: int32 + type: integer + type: object + status: + description: NnfDataMovementStatus defines the observed state of NnfDataMovement + properties: + commandStatus: + description: |- + CommandStatus reflects the current status of the underlying Data Movement command + as it executes. The command status is polled at a certain frequency to avoid excessive + updates to the Data Movement resource. + properties: + command: + description: The command that was executed during data movement. + type: string + data: + description: |- + Data is parsed from the dcp output when the command is finished. This is the total amount of + data copied by dcp. + type: string + directories: + description: |- + Directories is parsed from the dcp output when the command is finished. This is the number of + directories that dcp copied. Note: This value may be inflated due to NNF index mount + directories when copying from XFS or GFS2 filesystems. + format: int32 + type: integer + elapsedTime: + description: ElapsedTime reflects the elapsed time since the underlying + data movement command started. + type: string + files: + description: |- + Files is parsed from the dcp output when the command is finished. This is the number of files + that dcp copied. + format: int32 + type: integer + items: + description: |- + Items is parsed from the dcp output when the command is finished. This is a total of + the number of directories, files, and links that dcp copied. + format: int32 + type: integer + lastMessage: + description: |- + LastMessage reflects the last message received over standard output or standard error as + captured by the underlying data movement command. + type: string + lastMessageTime: + description: |- + LastMessageTime reflects the time at which the last message was received over standard output + or standard error by the underlying data movement command. + format: date-time + type: string + links: + description: |- + Links is parsed from the dcp output when the command is finished. This is the number of links + that dcp copied. + format: int32 + type: integer + progress: + description: |- + ProgressPercentage refects the progress of the underlying data movement command as captured from + standard output. A best effort is made to parse the command output as a percentage. If no + progress has yet to be measured than this field is omitted. If the latest command output does + not contain a valid percentage, then the value is unchanged from the previously parsed value. + format: int32 + type: integer + rate: + description: |- + Rate is parsed from the dcp output when the command is finished. This is transfer rate of the + data copied by dcp. + type: string + seconds: + description: Seconds is parsed from the dcp output when the command + is finished. + type: string + type: object + endTime: + description: EndTime reflects the time at which the Data Movement + operation ended. + format: date-time + type: string + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + message: + description: |- + Message contains any text that explains the Status. If Data Movement failed or storeStdout is + enabled, this will contain the command's output. + type: string + restarts: + description: Restarts contains the number of restarts of the Data + Movement operation. + type: integer + startTime: + description: StartTime reflects the time at which the Data Movement + operation started. + format: date-time + type: string + state: + description: Current state of data movement. + enum: + - Starting + - Running + - Finished + type: string + status: + description: Status of the current state. + enum: + - Success + - Failed + - Invalid + - Cancelled + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml index 93b1fc13..98cc6710 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml @@ -738,6 +738,277 @@ spec: will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + fsNameNext: + description: FsNameNext is the next available fsname that hasn't been + used + maxLength: 8 + minLength: 8 + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfLustreMGT is the Schema for the nnfstorageprofiles API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfLustreMGTSpec defines the desired state of NnfLustreMGT + properties: + addresses: + description: Addresses is the list of LNet addresses for the MGT + items: + type: string + type: array + claimList: + description: ClaimList is the list of currently in use fsnames + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + fsNameBlackList: + description: |- + FsNameBlackList is a list of fsnames that can't be used. This may be + necessary if the MGT hosts file systems external to Rabbit + items: + type: string + type: array + fsNameStart: + description: FsNameStart is the starting fsname to be used + maxLength: 8 + minLength: 8 + type: string + fsNameStartReference: + description: |- + FsNameStartReference can be used to add a configmap where the starting fsname is + stored. If this reference is set, it takes precendence over FsNameStart. The configmap + will be updated with the next available fsname anytime an fsname is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - addresses + type: object + status: + description: NnfLustreMGTStatus defines the current state of NnfLustreMGT + properties: + claimList: + description: ClaimList is the list of currently in use fsnames + items: + properties: + fsname: + type: string + reference: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml index a0b5b6a9..fa2c7460 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml @@ -500,6 +500,169 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. + Storage spec are created on request of the user and fullfilled by the NNF Node Controller. + properties: + allocations: + description: Allocations is the list of storage allocations to make + items: + properties: + access: + description: List of nodes where /dev devices should be created + items: + type: string + type: array + capacity: + description: Aggregate capacity of the block devices for each + allocation + format: int64 + type: integer + type: object + type: array + sharedAllocation: + description: SharedAllocation is used when a single NnfNodeBlockStorage + allocation is used by multiple NnfNodeStorage allocations + type: boolean + required: + - sharedAllocation + type: object + status: + properties: + allocations: + description: Allocations is the list of storage allocations that were + made + items: + properties: + accesses: + additionalProperties: + properties: + devicePaths: + description: /dev paths for each of the block devices + items: + type: string + type: array + storageGroupId: + description: Redfish ID for the storage group + type: string + type: object + description: Accesses is a map of node name to the access status + type: object + capacityAllocated: + description: |- + Total capacity allocated for the storage. This may differ from the requested storage + capacity as the system may round up to the requested capacity to satisify underlying + storage requirements (i.e. block size / stripe size). + format: int64 + type: integer + devices: + description: List of NVMe namespaces used by this allocation + items: + properties: + NQN: + description: NQN of the base NVMe device + type: string + capacityAllocated: + description: |- + Total capacity allocated for the storage. This may differ from the requested storage + capacity as the system may round up to the requested capacity to satisify underlying + storage requirements (i.e. block size / stripe size). + format: int64 + type: integer + namespaceId: + description: Id of the Namespace on the NVMe device (e.g., + "2") + type: string + required: + - NQN + - namespaceId + type: object + type: array + storagePoolId: + description: Redfish ID for the storage pool + type: string + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + podStartTime: + description: |- + PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did + last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths + listed in the status section are from the current boot of the node. + format: date-time + type: string + ready: + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml index 0b39098c..28410fe3 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml @@ -95,6 +95,46 @@ spec: subresources: status: {} - name: v1alpha3 + schema: + openAPIV3Schema: + description: NnfNodeECData is the Schema for the nnfnodeecdata API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfNodeECDataSpec defines the desired state of NnfNodeECData + type: object + status: + description: NnfNodeECDataStatus defines the observed state of NnfNodeECData + properties: + data: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v1alpha4 schema: openAPIV3Schema: description: NnfNodeECData is the Schema for the nnfnodeecdata API diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml index b716ba03..c174c113 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml @@ -491,6 +491,166 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Current desired state + jsonPath: .spec.state + name: STATE + type: string + - description: Health of node + jsonPath: .status.health + name: HEALTH + type: string + - description: Current status of node + jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - description: Parent pod name + jsonPath: .spec.pod + name: POD + priority: 1 + type: string + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfNode is the Schema for the NnfNode API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfNodeSpec defines the desired state of NNF Node + properties: + name: + description: The unique name for this NNF Node + type: string + pod: + description: Pod name for this NNF Node + type: string + state: + description: State reflects the desired state of this NNF Node resource + enum: + - Enable + - Disable + type: string + required: + - state + type: object + status: + description: NnfNodeStatus defines the observed status of NNF Node + properties: + capacity: + format: int64 + type: integer + capacityAllocated: + format: int64 + type: integer + drives: + items: + description: NnfDriveStatus defines the observe status of drives + connected to this NNF Node + properties: + capacity: + description: |- + Capacity in bytes of the device. The full capacity may not + be usable depending on what the storage driver can provide. + format: int64 + type: integer + firmwareVersion: + description: The firmware version of this storage controller. + type: string + health: + description: NnfResourceHealthType defines the health of an + NNF resource. + type: string + id: + description: ID reflects the NNF Node unique identifier for + this NNF Server resource. + type: string + model: + description: Model is the manufacturer information about the + device + type: string + name: + description: Name reflects the common name of this NNF Server + resource. + type: string + serialNumber: + description: The serial number for this storage controller. + type: string + slot: + description: Physical slot location of the storage controller. + type: string + status: + description: NnfResourceStatusType is the string that indicates + the resource's status + type: string + wearLevel: + description: WearLevel in percent for SSDs + format: int64 + type: integer + type: object + type: array + fenced: + description: Fenced is true when the NNF Node is fenced by the STONITH + agent, and false otherwise. + type: boolean + health: + description: NnfResourceHealthType defines the health of an NNF resource. + type: string + lnetNid: + description: LNetNid is the LNet address for the NNF node + type: string + servers: + items: + description: NnfServerStatus defines the observed status of servers + connected to this NNF Node + properties: + health: + description: NnfResourceHealthType defines the health of an + NNF resource. + type: string + hostname: + type: string + id: + description: ID reflects the NNF Node unique identifier for + this NNF Server resource. + type: string + name: + description: Name reflects the common name of this NNF Server + resource. + type: string + status: + description: NnfResourceStatusType is the string that indicates + the resource's status + type: string + type: object + type: array + status: + description: Status reflects the current status of the NNF Node + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index 4e865936..9cb42005 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -668,6 +668,225 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfNodeStorage is the Schema for the NnfNodeStorage API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. + Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. + properties: + blockReference: + description: BlockReference is an object reference to an NnfNodeBlockStorage + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + capacity: + description: Capacity of an individual allocation + format: int64 + type: integer + count: + description: |- + Count is the number of allocations to make on this node. All of the allocations will + be created with the same parameters + minimum: 0 + type: integer + fileSystemType: + default: raw + description: |- + FileSystemType defines the type of the desired filesystem, or raw + block device. + enum: + - raw + - lvm + - zfs + - xfs + - gfs2 + - lustre + type: string + groupID: + description: Group ID for file system + format: int32 + type: integer + lustreStorage: + description: |- + LustreStorageSpec describes the Lustre target created here, if + FileSystemType specifies a Lustre target. + properties: + backFs: + description: BackFs is the type of backing filesystem to use. + enum: + - ldiskfs + - zfs + type: string + fileSystemName: + description: FileSystemName is the fsname parameter for the Lustre + filesystem. + maxLength: 8 + type: string + mgsAddress: + description: |- + MgsAddress is the NID of the MGS to use. This is used only when + creating MDT and OST targets. + type: string + startIndex: + description: |- + StartIndex is used to order a series of MDTs or OSTs. This is used only + when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more + than 1, then StartIndex is the index of the first allocation, and the indexes + increment from there. + minimum: 0 + type: integer + targetType: + description: TargetType is the type of Lustre target to be created. + enum: + - mgt + - mdt + - mgtmdt + - ost + type: string + type: object + sharedAllocation: + description: SharedAllocation is used when a single NnfNodeBlockStorage + allocation is used by multiple NnfNodeStorage allocations + type: boolean + userID: + description: User ID for file system + format: int32 + type: integer + required: + - count + - groupID + - sharedAllocation + - userID + type: object + status: + description: NnfNodeStorageStatus defines the status for NnfNodeStorage + properties: + allocations: + description: Allocations is the list of storage allocations that were + made + items: + description: NnfNodeStorageAllocationStatus defines the allocation + status for each allocation in the NnfNodeStorage + properties: + logicalVolume: + description: Name of the LVM LV + type: string + ready: + type: boolean + volumeGroup: + description: Name of the LVM VG + type: string + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + type: boolean + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml index d2667e72..6db7514d 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml @@ -722,6 +722,243 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfPortManager is the Schema for the nnfportmanagers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfPortManagerSpec defines the desired state of NnfPortManager + properties: + allocations: + description: |- + Allocations is a list of allocation requests that the Port Manager will attempt + to satisfy. To request port resources from the port manager, clients should add + an entry to the allocations. Entries must be unique. The port manager controller + will attempt to allocate port resources for each allocation specification in the + list. To remove an allocation and free up port resources, remove the allocation + from the list. + items: + description: NnfPortManagerAllocationSpec defines the desired state + for a single port allocation + properties: + count: + default: 1 + description: |- + Count is the number of desired ports the requester needs. The port manager + will attempt to allocate this many ports. + type: integer + requester: + description: Requester is an object reference to the requester + of a ports. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - count + - requester + type: object + type: array + systemConfiguration: + description: |- + SystemConfiguration is an object reference to the system configuration. The + Port Manager will use the available ports defined in the system configuration. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - allocations + - systemConfiguration + type: object + status: + description: NnfPortManagerStatus defines the observed state of NnfPortManager + properties: + allocations: + description: Allocations is a list of port allocation status'. + items: + description: NnfPortManagerAllocationStatus defines the allocation + status of a port for a given requester. + properties: + ports: + description: Ports is list of ports allocated to the owning + resource. + items: + type: integer + type: array + requester: + description: |- + Requester is an object reference to the requester of the port resource, if one exists, or + empty otherwise. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + status: + description: Status is the ownership status of the port. + enum: + - InUse + - Free + - Cooldown + - InvalidConfiguration + - InsufficientResources + type: string + timeUnallocated: + description: |- + TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown + duration. + format: date-time + type: string + required: + - status + type: object + type: array + status: + description: Status is the current status of the port manager. + enum: + - Ready + - SystemConfigurationNotFound + type: string + required: + - status + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index 231670d0..9ed1addf 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -1848,5 +1848,679 @@ spec: type: object type: object served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: True if this is the default instance + jsonPath: .data.default + name: DEFAULT + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfStorageProfile is the Schema for the nnfstorageprofiles API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + description: NnfStorageProfileData defines the desired state of NnfStorageProfile + properties: + default: + default: false + description: Default is true if this instance is the default resource + to use + type: boolean + gfs2Storage: + description: GFS2Storage defines the GFS2-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + postActivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit after the + file system has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + file system is deactivated + items: + type: string + type: array + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + lustreStorage: + description: LustreStorage defines the Lustre-specific configuration + properties: + capacityMdt: + default: 5GiB + description: |- + CapacityMDT specifies the size of the MDT device. This is also + used for a combined MGT+MDT device. + pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ + type: string + capacityMgt: + default: 5GiB + description: CapacityMGT specifies the size of the MGT device. + pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ + type: string + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + OST capacity requested in the DirectiveBreakdown + type: string + combinedMgtMdt: + default: false + description: CombinedMGTMDT indicates whether the MGT and MDT + should be created on the same target device + type: boolean + exclusiveMdt: + default: false + description: ExclusiveMDT indicates that the MDT should not be + colocated with any other target on the chosen server. + type: boolean + externalMgs: + description: |- + ExternalMGS specifies the use of an existing MGS rather than creating one. This can + be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent + Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format + is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will + be picked from the pool. + type: string + mdtCommandlines: + description: MdtCmdLines contains commands to create an MDT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + postActivate: + description: |- + PostActivate specifies a list of commands to run on the Rabbit after the + Lustre target has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + Lustre target is deactivated + items: + type: string + type: array + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mdtOptions: + description: MdtOptions contains options to use for libraries + used for an MDT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mgtCommandlines: + description: MgtCmdLines contains commands to create an MGT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + postActivate: + description: |- + PostActivate specifies a list of commands to run on the Rabbit after the + Lustre target has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + Lustre target is deactivated + items: + type: string + type: array + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtCommandlines: + description: MgtMdtCmdLines contains commands to create a combined + MGT/MDT target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + postActivate: + description: |- + PostActivate specifies a list of commands to run on the Rabbit after the + Lustre target has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + Lustre target is deactivated + items: + type: string + type: array + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + mgtMdtOptions: + description: MgtMdtOptions contains options to use for libraries + used for a combined MGT/MDT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mgtOptions: + description: MgtOptions contains options to use for libraries + used for an MGT target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + mountCompute: + description: MountCompute specifies mount options for making the + Lustre client mount on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for making the + Lustre client mount on the Rabbit. + type: string + ostCommandlines: + description: OstCmdLines contains commands to create an OST target. + properties: + mkfs: + description: |- + Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". + Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). + Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. + type: string + mountTarget: + description: |- + MountTarget specifies the mount command line for the lustre target. + For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions + argument to mkfs.lustre instead. + type: string + postActivate: + description: |- + PostActivate specifies a list of commands to run on the Rabbit after the + Lustre target has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + Lustre target is deactivated + items: + type: string + type: array + zpoolCreate: + description: |- + ZpoolCreate specifies the zpool create commandline, minus the "zpool create". + This is where you may specify zpool create options, and the virtual device (vdev) such as + "mirror", or "draid". See zpoolconcepts(7). + type: string + type: object + ostOptions: + description: OstOptions contains options to use for libraries + used for an OST target. + properties: + colocateComputes: + default: false + description: |- + ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection + to the compute nodes in a workflow + type: boolean + count: + description: Count specifies how many Lustre targets to create + minimum: 1 + type: integer + scale: + description: Scale provides a unitless value to determine + how many Lustre targets to create + maximum: 10 + minimum: 1 + type: integer + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + required: + - colocateComputes + type: object + standaloneMgtPoolName: + description: |- + StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating + a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. + Multiple pools can be created. + type: string + type: object + pinned: + default: false + description: Pinned is true if this instance is an immutable copy + type: boolean + rawStorage: + description: RawStorage defines the Raw-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + postActivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit after the + file system has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + file system is deactivated + items: + type: string + type: array + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + xfsStorage: + description: XFSStorage defines the XFS-specific configuration + properties: + capacityScalingFactor: + default: "1.0" + description: CapacityScalingFactor is a scaling factor for the + capacity requested in the DirectiveBreakdown + type: string + commandlines: + description: CmdLines contains commands to create volumes and + filesystems. + properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object + lvCreate: + description: LvCreate specifies the lvcreate commandline, + minus the "lvcreate". + type: string + lvRemove: + description: LvRemove specifies the lvcreate commandline, + minus the "lvremove". + type: string + mkfs: + description: Mkfs specifies the mkfs commandline, minus the + "mkfs". + type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string + postActivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit after the + file system has been activated + items: + type: string + type: array + preDeactivate: + description: |- + PreDeactivate specifies a list of commands to run on the Rabbit before the + file system is deactivated + items: + type: string + type: array + pvCreate: + description: PvCreate specifies the pvcreate commandline, + minus the "pvcreate". + type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string + sharedVg: + default: false + description: |- + SharedVg specifies that allocations from a workflow on the same Rabbit should share an + LVM VolumeGroup + type: boolean + vgChange: + description: VgChange specifies the various vgchange commandlines, + minus the "vgchange" + properties: + lockStart: + description: The vgchange commandline for lockStart, minus + the "vgchange" command + type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string + type: object + vgCreate: + description: VgCreate specifies the vgcreate commandline, + minus the "vgcreate". + type: string + vgRemove: + description: VgCreate specifies the vgcreate commandline, + minus the "vgremove". + type: string + type: object + storageLabels: + description: |- + Storagelabels defines a list of labels that are added to the DirectiveBreakdown + labels constraint. This restricts allocations to Storage resources with these labels + items: + type: string + type: array + type: object + type: object + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true storage: true subresources: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index 875ed404..767a9f4c 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -896,6 +896,301 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.error.severity + name: ERROR + type: string + name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfStorage is the Schema for the storages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NnfStorageSpec defines the specification for requesting generic storage on a set + of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM + making the determination for which NNF Nodes it wants to utilize. + properties: + allocationSets: + description: |- + AllocationSets is a list of different types of storage allocations to make. Each + AllocationSet describes an entire allocation spanning multiple Rabbits. For example, + an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw + block devices in a raw block configuration. + items: + description: NnfStorageAllocationSetSpec defines the details for + an allocation set + properties: + backFs: + description: BackFs is the type of backing filesystem to use. + enum: + - ldiskfs + - zfs + type: string + capacity: + description: |- + Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself + may split the storage among the available drives operating in the NNF Node. + format: int64 + type: integer + mgsAddress: + description: |- + MgsAddress is the NID of the MGS when a pre-existing MGS is + provided in the NnfStorageProfile + type: string + name: + description: Name is a human readable label for this set of + allocations (e.g., xfs) + type: string + nodes: + description: Nodes is the list of Rabbit nodes to make allocations + on + items: + description: NnfStorageAllocationNodes identifies the node + and properties of the allocation to make on that node + properties: + count: + description: Number of allocations to make on this node + type: integer + name: + description: Name of the node to make the allocation on + type: string + required: + - count + - name + type: object + type: array + persistentMgsReference: + description: |- + PersistentMgsReference is a reference to a persistent storage that is providing + the external MGS. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + sharedAllocation: + description: |- + SharedAllocation shares a single block storage allocation between multiple file system allocations + (within the same workflow) on a Rabbit + type: boolean + targetType: + description: TargetType is the type of Lustre target to be created. + enum: + - mgt + - mdt + - mgtmdt + - ost + type: string + required: + - capacity + - name + - nodes + - sharedAllocation + type: object + type: array + fileSystemType: + default: raw + description: |- + FileSystemType defines the type of the desired filesystem, or raw + block device. + enum: + - raw + - lvm + - zfs + - xfs + - gfs2 + - lustre + type: string + groupID: + description: Group ID for file system + format: int32 + type: integer + userID: + description: User ID for file system + format: int32 + type: integer + required: + - allocationSets + - groupID + - userID + type: object + status: + description: NnfStorageStatus defines the observed status of NNF Storage. + properties: + allocationSets: + description: |- + AllocationsSets holds the status information for each of the AllocationSets + from the spec. + items: + description: NnfStorageAllocationSetStatus contains the status information + for an allocation set + properties: + allocationCount: + description: |- + AllocationCount is the total number of allocations that currently + exist + type: integer + ready: + type: boolean + required: + - allocationCount + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + fileSystemName: + description: FileSystemName is the fsname parameter for the Lustre + filesystem. + maxLength: 8 + type: string + lustreMgtReference: + description: |- + LustgreMgtReference is an object reference to the NnfLustreMGT resource used + by the NnfStorage + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + mgsAddress: + description: MgsAddress is the NID of the MGS. + type: string + ready: + description: Ready reflects the status of this NNF Storage + type: boolean + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml index 7e038bd1..093f1dc5 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml @@ -725,6 +725,246 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha4 + schema: + openAPIV3Schema: + description: NnfSystemStorage is the Schema for the nnfsystemstorages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NnfSystemStorageSpec defines the desired state of NnfSystemStorage + properties: + capacity: + default: 1073741824 + description: Capacity is the allocation size on each Rabbit + format: int64 + type: integer + clientMountPath: + description: ClientMountPath is an optional path for where to mount + the file system on the computes + type: string + computesPattern: + description: |- + ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This + is only used if ComputesTarget is "pattern" + items: + type: integer + maxItems: 16 + type: array + computesTarget: + default: all + description: ComputesTarget specifies which computes to make the storage + accessible to + enum: + - all + - even + - odd + - pattern + type: string + excludeComputes: + description: |- + ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the + SystemConfiguration + items: + type: string + type: array + excludeDisabledRabbits: + default: false + description: |- + ExcludeDisabledRabbits looks at the Storage resource for a Rabbit and does not use it if it's + marked as "disabled" + type: boolean + excludeRabbits: + description: ExludeRabbits is a list of Rabbits to exclude from the + Rabbits in the SystemConfiguration + items: + type: string + type: array + includeComputes: + description: |- + IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes + from the SystemConfiguration + items: + type: string + type: array + includeRabbits: + description: |- + IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the + SystemConfiguration + items: + type: string + type: array + makeClientMounts: + default: false + description: |- + MakeClientMounts specifies whether to make ClientMount resources or just + make the devices available to the client + type: boolean + storageProfile: + description: StorageProfile is an object reference to the storage + profile to use + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + systemConfiguration: + description: |- + SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this + field is empty, name: default namespace: default is used. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: + default: raw + description: Type is the file system type to use for the storage allocation + enum: + - raw + - xfs + - gfs2 + type: string + required: + - capacity + - makeClientMounts + - storageProfile + type: object + status: + description: NnfSystemStorageStatus defines the observed state of NnfSystemStorage + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready signifies whether all work has been completed + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} From 341179124288baa669cf729890430ecc272c625d Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Mon, 18 Nov 2024 13:00:30 -0600 Subject: [PATCH 09/23] Remove old kube-rbac-proxy from kind-push target (#415) Signed-off-by: Dean Roehrich --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index bf3e06df..c404412f 100644 --- a/Makefile +++ b/Makefile @@ -303,8 +303,6 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform kind-push: VERSION ?= $(shell cat .version) kind-push: .version ## Push docker image to kind kind load docker-image $(IMAGE_TAG_BASE):$(VERSION) - ${CONTAINER_TOOL} pull gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 - kind load docker-image gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 ##@ Deployment From 17ac743d3535b743f7bfb7b996cdbf0490bd1acc Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Fri, 15 Nov 2024 16:26:04 -0600 Subject: [PATCH 10/23] Add PostMount/PreUnmount Support to lustre; Rename PostActivate/PreDeactivate for non-lustre We currently do not have a way to perform actions (e.g. `lfs setstripe`) on lustre filesystems from the client side. For situations like `DataIn`, there is no way to prepare the lustre filesystem prior to data movement. This change adds PostMount and PreUnmount command lines to the NnfStorageProfile to allow commands to be run in those situations. - For lustre filesystems, PostMount and PreUnmount have been added in addition to the existing PostActivate/PreDeactivate commands - PostActivate/PreDeactivate are performed server-side - PostMount/PreUnmount are performed client-side - For XFS/GFS2 filesystems, PostActivate/PreDeactivate have been renamed to PostMount/PreUnmount For lustre, multiple NnfNodeStorages are created for each OST, MDT, and MGT. PostMount should only happen once, so OST0 is what is used. Before the filesystem can be mounted to run the commands, we need ensure that all other NnfNodeStorages are ready. Once that happens, OST0 can then be created and then the NnfNodeStorage controller can run the PostMount commands. The opposite logic applies in the PreUnmount case where OST0 is now deleted first and performs the PreUnmount commands. For XFS/GFS2, there is no issue of ordering. Signed-off-by: Blake Devcich --- api/v1alpha1/conversion.go | 20 ++- api/v1alpha1/zz_generated.conversion.go | 6 +- api/v1alpha2/conversion.go | 20 ++- api/v1alpha2/zz_generated.conversion.go | 6 +- api/v1alpha3/conversion.go | 54 ++++++- api/v1alpha3/zz_generated.conversion.go | 79 ++++----- api/v1alpha4/nnfstorage_types.go | 3 +- api/v1alpha4/nnfstorageprofile_types.go | 22 ++- api/v1alpha4/zz_generated.deepcopy.go | 18 ++- .../nnf.cray.hpe.com_nnfstorageprofiles.yaml | 100 +++++++++--- config/examples/nnf_nnfstorageprofile.yaml | 12 +- internal/controller/filesystem_helpers.go | 17 +- internal/controller/integration_test.go | 5 +- internal/controller/nnf_access_controller.go | 8 +- .../controller/nnf_node_storage_controller.go | 28 +++- internal/controller/nnf_storage_controller.go | 150 ++++++++++++++---- pkg/filesystem/filesystem.go | 6 + pkg/filesystem/kind.go | 26 ++- pkg/filesystem/lustre.go | 98 +++++++++++- pkg/filesystem/mock.go | 26 ++- pkg/filesystem/simple.go | 48 +++--- 21 files changed, 587 insertions(+), 165 deletions(-) diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index fd10b5f4..72cbf1c1 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -437,18 +437,26 @@ func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { if hasAnno { dst.Data.LustreStorage.MgtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostActivate...) dst.Data.LustreStorage.MgtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MgtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreUnmount...) dst.Data.LustreStorage.MgtMdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostActivate...) dst.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MgtMdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtMdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreUnmount...) dst.Data.LustreStorage.MdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostActivate...) dst.Data.LustreStorage.MdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostMount...) + dst.Data.LustreStorage.MdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreUnmount...) dst.Data.LustreStorage.OstCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostActivate...) dst.Data.LustreStorage.OstCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreDeactivate...) - dst.Data.RawStorage.CmdLines.PostActivate = append([]string(nil), restored.Data.RawStorage.CmdLines.PostActivate...) - dst.Data.RawStorage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.RawStorage.CmdLines.PreDeactivate...) - dst.Data.XFSStorage.CmdLines.PostActivate = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostActivate...) - dst.Data.XFSStorage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreDeactivate...) - dst.Data.GFS2Storage.CmdLines.PostActivate = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostActivate...) - dst.Data.GFS2Storage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreDeactivate...) + dst.Data.LustreStorage.OstCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostMount...) + dst.Data.LustreStorage.OstCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreUnmount...) + dst.Data.RawStorage.CmdLines.PostMount = append([]string(nil), restored.Data.RawStorage.CmdLines.PostMount...) + dst.Data.RawStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.RawStorage.CmdLines.PreUnmount...) + dst.Data.XFSStorage.CmdLines.PostMount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostMount...) + dst.Data.XFSStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreUnmount...) + dst.Data.GFS2Storage.CmdLines.PostMount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostMount...) + dst.Data.GFS2Storage.CmdLines.PreUnmount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreUnmount...) } return nil diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index ea29aa94..015dc2a5 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -2683,9 +2683,9 @@ func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfil } out.LvRemove = in.LvRemove out.MountRabbit = in.MountRabbit - // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type out.MountCompute = in.MountCompute - // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type return nil } @@ -2866,6 +2866,8 @@ func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorage out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index c564d223..4fb0531b 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -437,18 +437,26 @@ func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { if hasAnno { dst.Data.LustreStorage.MgtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostActivate...) dst.Data.LustreStorage.MgtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MgtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreUnmount...) dst.Data.LustreStorage.MgtMdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostActivate...) dst.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MgtMdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtMdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreUnmount...) dst.Data.LustreStorage.MdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostActivate...) dst.Data.LustreStorage.MdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreDeactivate...) + dst.Data.LustreStorage.MdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostMount...) + dst.Data.LustreStorage.MdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreUnmount...) dst.Data.LustreStorage.OstCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostActivate...) dst.Data.LustreStorage.OstCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreDeactivate...) - dst.Data.RawStorage.CmdLines.PostActivate = append([]string(nil), restored.Data.RawStorage.CmdLines.PostActivate...) - dst.Data.RawStorage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.RawStorage.CmdLines.PreDeactivate...) - dst.Data.XFSStorage.CmdLines.PostActivate = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostActivate...) - dst.Data.XFSStorage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreDeactivate...) - dst.Data.GFS2Storage.CmdLines.PostActivate = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostActivate...) - dst.Data.GFS2Storage.CmdLines.PreDeactivate = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreDeactivate...) + dst.Data.LustreStorage.OstCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostMount...) + dst.Data.LustreStorage.OstCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreUnmount...) + dst.Data.RawStorage.CmdLines.PostMount = append([]string(nil), restored.Data.RawStorage.CmdLines.PostMount...) + dst.Data.RawStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.RawStorage.CmdLines.PreUnmount...) + dst.Data.XFSStorage.CmdLines.PostMount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostMount...) + dst.Data.XFSStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreUnmount...) + dst.Data.GFS2Storage.CmdLines.PostMount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostMount...) + dst.Data.GFS2Storage.CmdLines.PreUnmount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreUnmount...) } return nil diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index 214f3c85..73545eb2 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -2683,9 +2683,9 @@ func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfil } out.LvRemove = in.LvRemove out.MountRabbit = in.MountRabbit - // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type out.MountCompute = in.MountCompute - // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type return nil } @@ -2866,6 +2866,8 @@ func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorage out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index db8e7da2..504ad723 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -21,6 +21,7 @@ package v1alpha3 import ( apierrors "k8s.io/apimachinery/pkg/api/errors" + apiconversion "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/conversion" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -425,12 +426,37 @@ func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfStorageProfile{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Data.LustreStorage.MgtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreUnmount...) + dst.Data.LustreStorage.MgtMdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostMount...) + dst.Data.LustreStorage.MgtMdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreUnmount...) + dst.Data.LustreStorage.MdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostMount...) + dst.Data.LustreStorage.MdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreUnmount...) + dst.Data.LustreStorage.OstCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostMount...) + dst.Data.LustreStorage.OstCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreUnmount...) + dst.Data.RawStorage.CmdLines.PostMount = append([]string(nil), restored.Data.RawStorage.CmdLines.PostMount...) + dst.Data.RawStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.RawStorage.CmdLines.PreUnmount...) + dst.Data.XFSStorage.CmdLines.PostMount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostMount...) + dst.Data.XFSStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreUnmount...) + dst.Data.GFS2Storage.CmdLines.PostMount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostMount...) + dst.Data.GFS2Storage.CmdLines.PreUnmount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreUnmount...) + } else { + // For non-lustre, PostActivate is now PostMount and PreDeactivate is now PreUnmount + dst.Data.RawStorage.CmdLines.PostMount = src.Data.RawStorage.CmdLines.PostActivate + dst.Data.XFSStorage.CmdLines.PostMount = src.Data.XFSStorage.CmdLines.PostActivate + dst.Data.GFS2Storage.CmdLines.PostMount = src.Data.GFS2Storage.CmdLines.PostActivate + dst.Data.RawStorage.CmdLines.PreUnmount = src.Data.RawStorage.CmdLines.PreDeactivate + dst.Data.XFSStorage.CmdLines.PreUnmount = src.Data.XFSStorage.CmdLines.PreDeactivate + dst.Data.GFS2Storage.CmdLines.PreUnmount = src.Data.GFS2Storage.CmdLines.PreDeactivate + } return nil } @@ -443,6 +469,14 @@ func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { return err } + // For non-lustre, PostActivate is now PostMount and PreDeactivate is now PreUnmount + dst.Data.RawStorage.CmdLines.PostActivate = src.Data.RawStorage.CmdLines.PostMount + dst.Data.XFSStorage.CmdLines.PostActivate = src.Data.XFSStorage.CmdLines.PostMount + dst.Data.GFS2Storage.CmdLines.PostActivate = src.Data.GFS2Storage.CmdLines.PostMount + dst.Data.RawStorage.CmdLines.PreDeactivate = src.Data.RawStorage.CmdLines.PreUnmount + dst.Data.XFSStorage.CmdLines.PreDeactivate = src.Data.XFSStorage.CmdLines.PreUnmount + dst.Data.GFS2Storage.CmdLines.PreDeactivate = src.Data.GFS2Storage.CmdLines.PreUnmount + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -599,3 +633,21 @@ func (src *NnfSystemStorageList) ConvertTo(dstRaw conversion.Hub) error { func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertFrom") } + +// The conversion-gen tool dropped these from zz_generated.conversion.go to +// force us to acknowledge that we are addressing the conversion requirements. + +// Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *nnfv1alpha4.NnfStorageProfileCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) +} + +// Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *nnfv1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in, out, s) +} + +// Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index 1f6d905f..dd057153 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -703,16 +703,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha4.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha4.NnfStorageProfileData), scope) }); err != nil { @@ -768,11 +758,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha4.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha4.NnfStorageProfileLustreData), scope) }); err != nil { @@ -873,6 +858,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) + }); err != nil { + return err + } return nil } @@ -2658,17 +2658,12 @@ func autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfil } out.LvRemove = in.LvRemove out.MountRabbit = in.MountRabbit - out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type out.MountCompute = in.MountCompute - out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. -func Convert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha3_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) -} - func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { out.Mkfs = in.Mkfs out.SharedVg = in.SharedVg @@ -2685,17 +2680,12 @@ func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfil } out.LvRemove = in.LvRemove out.MountRabbit = in.MountRabbit - out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type out.MountCompute = in.MountCompute - out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(in, out, s) -} - func autoConvert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { out.Default = in.Default out.Pinned = in.Pinned @@ -2816,7 +2806,17 @@ func Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha3_NnfStorag func autoConvert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfStorageProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfStorageProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2827,7 +2827,17 @@ func Convert_v1alpha3_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in func autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha3_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfStorageProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorageProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha3_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2855,15 +2865,12 @@ func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorage out.Mkfs = in.Mkfs out.MountTarget = in.MountTarget out.PostActivate = *(*[]string)(unsafe.Pointer(&in.PostActivate)) + // WARNING: in.PostMount requires manual conversion: does not exist in peer-type + // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type out.PreDeactivate = *(*[]string)(unsafe.Pointer(&in.PreDeactivate)) return nil } -// Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) -} - func autoConvert_v1alpha3_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { out.CombinedMGTMDT = in.CombinedMGTMDT out.ExternalMGS = in.ExternalMGS diff --git a/api/v1alpha4/nnfstorage_types.go b/api/v1alpha4/nnfstorage_types.go index f2577b24..b9ab275c 100644 --- a/api/v1alpha4/nnfstorage_types.go +++ b/api/v1alpha4/nnfstorage_types.go @@ -28,7 +28,8 @@ import ( ) const ( - AllocationSetLabel = "nnf.cray.hpe.com/allocationset" + AllocationSetLabel = "nnf.cray.hpe.com/allocationset" + AllocationSetOST0Label = "nnf.cray.hpe.com/allocationset_ost0" ) // NnfStorageAllocationNodes identifies the node and properties of the allocation to make on that node diff --git a/api/v1alpha4/nnfstorageprofile_types.go b/api/v1alpha4/nnfstorageprofile_types.go index 6adc8b05..6370606e 100644 --- a/api/v1alpha4/nnfstorageprofile_types.go +++ b/api/v1alpha4/nnfstorageprofile_types.go @@ -46,6 +46,16 @@ type NnfStorageProfileLustreCmdLines struct { // Lustre target has been activated PostActivate []string `json:"postActivate,omitempty"` + // PostMount specifies a list of commands to run on the Rabbit (Lustre client) after the Lustre + // target is activated. This includes mounting the Lustre filesystem beforehand and unmounting + // it afterward. + PostMount []string `json:"postMount,omitempty"` + + // PreUnmount specifies a list of commands to run on the Rabbit (Lustre client) before the + // Lustre target is deactivated. This includes mounting the Lustre filesystem beforehand and + // unmounting it afterward. + PreUnmount []string `json:"preUnmount,omitempty"` + // PreDeactivate specifies a list of commands to run on the Rabbit before the // Lustre target is deactivated PreDeactivate []string `json:"preDeactivate,omitempty"` @@ -178,16 +188,16 @@ type NnfStorageProfileCmdLines struct { // MountRabbit specifies mount options for mounting on the Rabbit. MountRabbit string `json:"mountRabbit,omitempty"` - // PreDeactivate specifies a list of commands to run on the Rabbit after the - // file system has been activated - PostActivate []string `json:"postActivate,omitempty"` + // PostMount specifies a list of commands to run on the Rabbit after the + // file system has been activated and mounted. + PostMount []string `json:"postMount,omitempty"` // MountCompute specifies mount options for mounting on the Compute. MountCompute string `json:"mountCompute,omitempty"` - // PreDeactivate specifies a list of commands to run on the Rabbit before the - // file system is deactivated - PreDeactivate []string `json:"preDeactivate,omitempty"` + // PreUnmount specifies a list of commands to run on the Rabbit before the + // file system is deactivated and unmounted. + PreUnmount []string `json:"preUnmount,omitempty"` } // NnfStorageProfileLVMVgChangeCmdLines diff --git a/api/v1alpha4/zz_generated.deepcopy.go b/api/v1alpha4/zz_generated.deepcopy.go index a9b084bb..dbbf2b92 100644 --- a/api/v1alpha4/zz_generated.deepcopy.go +++ b/api/v1alpha4/zz_generated.deepcopy.go @@ -1646,13 +1646,13 @@ func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines *out = *in out.VgChange = in.VgChange out.LvChange = in.LvChange - if in.PostActivate != nil { - in, out := &in.PostActivate, &out.PostActivate + if in.PostMount != nil { + in, out := &in.PostMount, &out.PostMount *out = make([]string, len(*in)) copy(*out, *in) } - if in.PreDeactivate != nil { - in, out := &in.PreDeactivate, &out.PreDeactivate + if in.PreUnmount != nil { + in, out := &in.PreUnmount, &out.PreUnmount *out = make([]string, len(*in)) copy(*out, *in) } @@ -1778,6 +1778,16 @@ func (in *NnfStorageProfileLustreCmdLines) DeepCopyInto(out *NnfStorageProfileLu *out = make([]string, len(*in)) copy(*out, *in) } + if in.PostMount != nil { + in, out := &in.PostMount, &out.PostMount + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreUnmount != nil { + in, out := &in.PreUnmount, &out.PreUnmount + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.PreDeactivate != nil { in, out := &in.PreDeactivate, &out.PreDeactivate *out = make([]string, len(*in)) diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index 9ed1addf..adfd59a4 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -1923,17 +1923,17 @@ spec: description: MountRabbit specifies mount options for mounting on the Rabbit. type: string - postActivate: + postMount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit after the - file system has been activated + PostMount specifies a list of commands to run on the Rabbit after the + file system has been activated and mounted. items: type: string type: array - preDeactivate: + preUnmount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit before the - file system is deactivated + PreUnmount specifies a list of commands to run on the Rabbit before the + file system is deactivated and unmounted. items: type: string type: array @@ -2041,6 +2041,14 @@ spec: items: type: string type: array + postMount: + description: |- + PostMount specifies a list of commands to run on the Rabbit (Lustre client) after the Lustre + target is activated. This includes mounting the Lustre filesystem beforehand and unmounting + it afterward. + items: + type: string + type: array preDeactivate: description: |- PreDeactivate specifies a list of commands to run on the Rabbit before the @@ -2048,6 +2056,14 @@ spec: items: type: string type: array + preUnmount: + description: |- + PreUnmount specifies a list of commands to run on the Rabbit (Lustre client) before the + Lustre target is deactivated. This includes mounting the Lustre filesystem beforehand and + unmounting it afterward. + items: + type: string + type: array zpoolCreate: description: |- ZpoolCreate specifies the zpool create commandline, minus the "zpool create". @@ -2107,6 +2123,14 @@ spec: items: type: string type: array + postMount: + description: |- + PostMount specifies a list of commands to run on the Rabbit (Lustre client) after the Lustre + target is activated. This includes mounting the Lustre filesystem beforehand and unmounting + it afterward. + items: + type: string + type: array preDeactivate: description: |- PreDeactivate specifies a list of commands to run on the Rabbit before the @@ -2114,6 +2138,14 @@ spec: items: type: string type: array + preUnmount: + description: |- + PreUnmount specifies a list of commands to run on the Rabbit (Lustre client) before the + Lustre target is deactivated. This includes mounting the Lustre filesystem beforehand and + unmounting it afterward. + items: + type: string + type: array zpoolCreate: description: |- ZpoolCreate specifies the zpool create commandline, minus the "zpool create". @@ -2144,6 +2176,14 @@ spec: items: type: string type: array + postMount: + description: |- + PostMount specifies a list of commands to run on the Rabbit (Lustre client) after the Lustre + target is activated. This includes mounting the Lustre filesystem beforehand and unmounting + it afterward. + items: + type: string + type: array preDeactivate: description: |- PreDeactivate specifies a list of commands to run on the Rabbit before the @@ -2151,6 +2191,14 @@ spec: items: type: string type: array + preUnmount: + description: |- + PreUnmount specifies a list of commands to run on the Rabbit (Lustre client) before the + Lustre target is deactivated. This includes mounting the Lustre filesystem beforehand and + unmounting it afterward. + items: + type: string + type: array zpoolCreate: description: |- ZpoolCreate specifies the zpool create commandline, minus the "zpool create". @@ -2248,6 +2296,14 @@ spec: items: type: string type: array + postMount: + description: |- + PostMount specifies a list of commands to run on the Rabbit (Lustre client) after the Lustre + target is activated. This includes mounting the Lustre filesystem beforehand and unmounting + it afterward. + items: + type: string + type: array preDeactivate: description: |- PreDeactivate specifies a list of commands to run on the Rabbit before the @@ -2255,6 +2311,14 @@ spec: items: type: string type: array + preUnmount: + description: |- + PreUnmount specifies a list of commands to run on the Rabbit (Lustre client) before the + Lustre target is deactivated. This includes mounting the Lustre filesystem beforehand and + unmounting it afterward. + items: + type: string + type: array zpoolCreate: description: |- ZpoolCreate specifies the zpool create commandline, minus the "zpool create". @@ -2348,17 +2412,17 @@ spec: description: MountRabbit specifies mount options for mounting on the Rabbit. type: string - postActivate: + postMount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit after the - file system has been activated + PostMount specifies a list of commands to run on the Rabbit after the + file system has been activated and mounted. items: type: string type: array - preDeactivate: + preUnmount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit before the - file system is deactivated + PreUnmount specifies a list of commands to run on the Rabbit before the + file system is deactivated and unmounted. items: type: string type: array @@ -2451,17 +2515,17 @@ spec: description: MountRabbit specifies mount options for mounting on the Rabbit. type: string - postActivate: + postMount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit after the - file system has been activated + PostMount specifies a list of commands to run on the Rabbit after the + file system has been activated and mounted. items: type: string type: array - preDeactivate: + preUnmount: description: |- - PreDeactivate specifies a list of commands to run on the Rabbit before the - file system is deactivated + PreUnmount specifies a list of commands to run on the Rabbit before the + file system is deactivated and unmounted. items: type: string type: array diff --git a/config/examples/nnf_nnfstorageprofile.yaml b/config/examples/nnf_nnfstorageprofile.yaml index 74211b41..73b0a676 100644 --- a/config/examples/nnf_nnfstorageprofile.yaml +++ b/config/examples/nnf_nnfstorageprofile.yaml @@ -23,6 +23,8 @@ data: zpoolCreate: -O canmount=off -o cachefile=none $POOL_NAME $DEVICE_LIST mkfs: --ost --backfstype=$BACKFS --fsname=$FS_NAME --mgsnode=$MGS_NID --index=$INDEX $ZVOL_NAME mountTarget: $ZVOL_NAME $MOUNT_PATH + postMount: + - 'lfs setstripe -E 64K -L mdt -E 16m -c 1 -S 16m -E 1G -c 2 -E 4G -c 4 -E 16G -c 8 -E 64G -c 16 -E -1 -c -1 $MOUNT_PATH' ostOptions: scale: 5 colocateComputes: true @@ -52,8 +54,8 @@ data: mkfs: -j2 -p $PROTOCOL -t $CLUSTER_NAME:$LOCK_SPACE $DEVICE mountRabbit: $DEVICE $MOUNT_PATH mountCompute: $DEVICE $MOUNT_PATH - postActivate: - - "chown $USERID:$GROUPID $MOUNT_PATH" + postMount: + - 'chown $USERID:$GROUPID $MOUNT_PATH' xfsStorage: commandlines: sharedVg: true @@ -72,8 +74,8 @@ data: mkfs: $DEVICE mountRabbit: $DEVICE $MOUNT_PATH mountCompute: $DEVICE $MOUNT_PATH - postActivate: - - "chown $USERID:$GROUPID $MOUNT_PATH" + postMount: + - 'chown $USERID:$GROUPID $MOUNT_PATH' rawStorage: commandlines: sharedVg: true @@ -89,5 +91,3 @@ data: activate: --activate y $VG_NAME/$LV_NAME deactivate: --activate n $VG_NAME/$LV_NAME lvRemove: $VG_NAME - - diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index e81dc58f..cb176cb3 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -348,8 +348,8 @@ func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) fs.CommandArgs.Mount = "-o bind $DEVICE $MOUNT_PATH" - fs.CommandArgs.PostActivate = cmdLines.PostActivate - fs.CommandArgs.PreDeactivate = cmdLines.PreDeactivate + fs.CommandArgs.PostMount = cmdLines.PostMount + fs.CommandArgs.PreUnmount = cmdLines.PreUnmount fs.CommandArgs.Vars = map[string]string{ "$USERID": fmt.Sprintf("%d", nnfNodeStorage.Spec.UserID), "$GROUPID": fmt.Sprintf("%d", nnfNodeStorage.Spec.GroupID), @@ -372,8 +372,8 @@ func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf } else { fs.CommandArgs.Mount = cmdLines.MountCompute } - fs.CommandArgs.PostActivate = cmdLines.PostActivate - fs.CommandArgs.PreDeactivate = cmdLines.PreDeactivate + fs.CommandArgs.PostMount = cmdLines.PostMount + fs.CommandArgs.PreUnmount = cmdLines.PreUnmount fs.CommandArgs.Mkfs = fmt.Sprintf("-O %s", cmdLines.Mkfs) fs.CommandArgs.Vars = map[string]string{ "$CLUSTER_NAME": nnfNodeStorage.Namespace, @@ -400,8 +400,8 @@ func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv } else { fs.CommandArgs.Mount = cmdLines.MountCompute } - fs.CommandArgs.PostActivate = cmdLines.PostActivate - fs.CommandArgs.PreDeactivate = cmdLines.PreDeactivate + fs.CommandArgs.PostMount = cmdLines.PostMount + fs.CommandArgs.PreUnmount = cmdLines.PreUnmount fs.CommandArgs.Mkfs = cmdLines.Mkfs fs.CommandArgs.Vars = map[string]string{ "$USERID": fmt.Sprintf("%d", nnfNodeStorage.Spec.UserID), @@ -427,12 +427,15 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n fs.MgsAddress = nnfNodeStorage.Spec.LustreStorage.MgsAddress fs.Index = nnfNodeStorage.Spec.LustreStorage.StartIndex + index fs.BackFs = nnfNodeStorage.Spec.LustreStorage.BackFs - fs.CommandArgs.Mkfs = cmdLines.Mkfs fs.CommandArgs.MountTarget = cmdLines.MountTarget fs.CommandArgs.Mount = mountCommand fs.CommandArgs.PostActivate = cmdLines.PostActivate + fs.CommandArgs.PostMount = cmdLines.PostMount + fs.CommandArgs.PreUnmount = cmdLines.PreUnmount fs.CommandArgs.PreDeactivate = cmdLines.PreDeactivate + fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) + fs.CommandArgs.Vars = map[string]string{ "$USERID": fmt.Sprintf("%d", nnfNodeStorage.Spec.UserID), "$GROUPID": fmt.Sprintf("%d", nnfNodeStorage.Spec.GroupID), diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go index 6dc224dc..b36f0b02 100644 --- a/internal/controller/integration_test.go +++ b/internal/controller/integration_test.go @@ -94,7 +94,10 @@ var _ = Describe("Integration Test", func() { }, } - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodeStorage), nnfNodeStorage)).To(Succeed()) + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodeStorage), nnfNodeStorage) + }).Should(Succeed()) + By("Verify that the NnfNodeStorage has a label for the pinned profile") _, err := getPinnedStorageProfileFromLabel(context.TODO(), k8sClient, nnfNodeStorage) Expect(err).ShouldNot(HaveOccurred()) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index 9b85780e..ab206a52 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -1093,25 +1093,27 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * } // Check whether the clientmounts have finished mounting/unmounting + count := 0 for _, clientMount := range clientMounts { if len(clientMount.Status.Mounts) != len(clientMount.Spec.Mounts) { return false, nil } for _, mount := range clientMount.Status.Mounts { + count++ if string(mount.State) != access.Status.State { return false, nil } - if mount.Ready == false { + if !mount.Ready { return false, nil } } } - if len(clientMounts) != len(clientList) { + if count != len(clientList) { if access.GetDeletionTimestamp().IsZero() { - log.Info("unexpected number of ClientMounts", "found", len(clientMounts), "expected", len(clientList)) + log.Info("unexpected number of ClientMounts", "found", count, "expected", len(clientList)) } return false, nil } diff --git a/internal/controller/nnf_node_storage_controller.go b/internal/controller/nnf_node_storage_controller.go index e9f27ca2..e40604a1 100644 --- a/internal/controller/nnf_node_storage_controller.go +++ b/internal/controller/nnf_node_storage_controller.go @@ -49,8 +49,6 @@ const ( // prevents the system from deleting the custom resource until the // reconciler has finished using the resource. finalizerNnfNodeStorage = "nnf.cray.hpe.com/nnf_node_storage" - - nnfNodeStorageResourceName = "nnf-node-storage" ) // NnfNodeStorageReconciler contains the elements needed during reconciliation for NnfNodeStorage @@ -216,7 +214,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque } for _, allocation := range nnfNodeStorage.Status.Allocations { - if allocation.Ready == false { + if !allocation.Ready { nnfNodeStorage.Status.Ready = false return ctrl.Result{Requeue: true}, nil @@ -259,6 +257,17 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode log.Info("Activated file system", "allocation", index) } + lustreOST0 := nnfNodeStorage.Spec.FileSystemType == "lustre" && nnfNodeStorage.Spec.LustreStorage.TargetType == "ost" && nnfNodeStorage.Spec.LustreStorage.StartIndex == 0 + if lustreOST0 || nnfNodeStorage.Spec.FileSystemType != "lustre" { + ran, err = fileSystem.PreUnmount(ctx) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not run pre unmount for file system").WithError(err).WithMajor() + } + if ran { + log.Info("Pre unmount file system", "allocation", index) + } + } + ran, err = fileSystem.PreDeactivate(ctx) if err != nil { return nil, dwsv1alpha2.NewResourceError("could not run pre deactivate for file system").WithError(err).WithMajor() @@ -363,6 +372,19 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod log.Info("Post activate file system", "allocation", index) } + // For lustre, PostMount should only happen on OST0 only. For other file systems, just run + // PostMount + lustreOST0 := nnfNodeStorage.Spec.FileSystemType == "lustre" && nnfNodeStorage.Spec.LustreStorage.TargetType == "ost" && nnfNodeStorage.Spec.LustreStorage.StartIndex == 0 + if lustreOST0 || nnfNodeStorage.Spec.FileSystemType != "lustre" { + ran, err = fileSystem.PostMount(ctx, allocationStatus.Ready) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not run post mount").WithError(err).WithMajor() + } + if ran { + log.Info("Post mount file system", "allocation", index) + } + } + allocationStatus.Ready = true } diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index 05e805cb..d9cd6e3e 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -63,10 +63,6 @@ const ( // has finished in using the resource. finalizerNnfStorage = "nnf.cray.hpe.com/nnf_storage" - // ownerAnnotation is a name/namespace pair used on the NnfNodeStorage resources - // for owner information. See nnfNodeStorageMapFunc() below. - ownerAnnotation = "nnf.cray.hpe.com/owner" - // Minimum size of lustre allocation sizes. If a user requests less than this, then the capacity // is set to this value. minimumLustreAllocationSizeInBytes = 4000000000 @@ -218,7 +214,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Collect status information from the NnfNodeStorage resources and aggregate it into the // NnfStorage for i := range storage.Spec.AllocationSets { - res, err := r.aggregateNodeStorageStatus(ctx, storage, i, false) + res, err := r.aggregateNodeStorageStatus(ctx, storage, i, false, false) if err != nil { return ctrl.Result{}, err } @@ -234,12 +230,12 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Wait for all the allocation sets to be ready for _, allocationSet := range storage.Status.AllocationSets { - if allocationSet.Ready == false { + if !allocationSet.Ready { return ctrl.Result{}, nil } } - if storage.Spec.FileSystemType == "lustre" && storage.Status.Ready == false { + if storage.Spec.FileSystemType == "lustre" && !storage.Status.Ready { res, err := r.setLustreOwnerGroup(ctx, storage) if err != nil { return ctrl.Result{}, err @@ -470,7 +466,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte } for _, nnfNodeBlockStorage := range nnfNodeBlockStorages { - if nnfNodeBlockStorage.Status.Ready == false { + if !nnfNodeBlockStorage.Status.Ready { return &ctrl.Result{}, nil } } @@ -503,7 +499,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n if allocationSet.TargetType == "mgt" || allocationSet.TargetType == "mgtmdt" { // Wait for the MGT to be set up before creating nnfnodestorages for the other allocation sets if allocationSetIndex != i { - if storage.Status.AllocationSets[i].Ready == false { + if !storage.Status.AllocationSets[i].Ready { return nil, nil } } @@ -573,6 +569,31 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n } allocationSet := storage.Spec.AllocationSets[allocationSetIndex] + lustreOST := storage.Spec.FileSystemType == "lustre" && allocationSet.TargetType == "ost" + + // When creating lustre filesystems, we want to create Lustre OST0 last so we can signal to the + // NnfNodeStorage controller when it is OK to run PostMount commands. OST0 should be created + // last and only when all of the other NnfNodeStorage for each allocation sets is ready. Until + // those are ready, skip the creation of OST0. + skipOST0 := false + if lustreOST { + for i := range storage.Spec.AllocationSets { + res, err := r.aggregateNodeStorageStatus(ctx, storage, i, false, true) + if err != nil { + return &ctrl.Result{}, err + } + + if res != nil { + if *res == (ctrl.Result{}) { + skipOST0 = true // not ready, skip OST0 + continue + } else { + return res, nil + } + } + } + } + startIndex := 0 for i, node := range allocationSet.Nodes { // Per Rabbit namespace. @@ -583,6 +604,12 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n }, } + // Do not create lustre OST0 until all other NnfNodeStorages are ready + if lustreOST && startIndex == 0 && skipOST0 { + startIndex += node.Count + continue + } + result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfNodeStorage, func() error { dwsv1alpha2.InheritParentLabels(nnfNodeStorage, storage) @@ -590,6 +617,9 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n labels := nnfNodeStorage.GetLabels() labels[nnfv1alpha4.AllocationSetLabel] = allocationSet.Name + if lustreOST && startIndex == 0 { + labels[nnfv1alpha4.AllocationSetOST0Label] = "true" + } nnfNodeStorage.SetLabels(labels) nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ @@ -641,9 +671,11 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n } // Get the status from all the child NnfNodeStorage resources and use them to build the status -// for the NnfStorage. -func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha4.NnfStorage, allocationSetIndex int, deleting bool) (*ctrl.Result, error) { +// for the NnfStorage. When skipOST0 is set, expect 1 less NnfNodeStorage resource when processing +// allocationSets for Lustre OST. +func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha4.NnfStorage, allocationSetIndex int, deleting, skipOST0 bool) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) + lustreOST := storage.Spec.FileSystemType == "lustre" && storage.Spec.AllocationSets[allocationSetIndex].TargetType == "ost" nnfNodeStorageList := &nnfv1alpha4.NnfNodeStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) @@ -706,7 +738,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s } for _, nnfNodeStorage := range nnfNodeStorages { - if nnfNodeStorage.Status.Ready == false { + if !nnfNodeStorage.Status.Ready { return &ctrl.Result{}, nil } } @@ -714,9 +746,18 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s // Ensure that we found all the NnfNodeStorage resources we were expecting. This can be expected // transiently as it takes time for the client cache to be updated. Log a message in case the count // never reaches the expected value. - if len(nnfNodeStorages) != len(storage.Spec.AllocationSets[allocationSetIndex].Nodes) { + found := len(nnfNodeStorages) + expected := len(storage.Spec.AllocationSets[allocationSetIndex].Nodes) + + // In the Lustre OST0 case, the NnfNodeStorage has not been created yet, so we can safely expect + // 1 less than the total number of OSTs. + if lustreOST && skipOST0 { + expected = expected - 1 + } + + if found != expected { if storage.GetDeletionTimestamp().IsZero() { - log.Info("unexpected number of NnfNodeStorages", "found", len(nnfNodeStorages), "expected", len(storage.Spec.AllocationSets[allocationSetIndex].Nodes)) + log.Info("unexpected number of NnfNodeStorages", "found", found, "expected", expected) } return &ctrl.Result{}, nil } @@ -901,21 +942,16 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora return &ctrl.Result{}, dwsv1alpha2.NewResourceError("zero length node array for OST").WithFatal() } - tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") - if len(tempMountDir) == 0 { - tempMountDir = "/mnt/tmp/" - } - dwsv1alpha2.InheritParentLabels(clientMount, nnfStorage) dwsv1alpha2.AddOwnerLabels(clientMount, nnfStorage) clientMount.Spec.Node = allocationSet.Nodes[0].Name clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountStateMounted clientMount.Spec.Mounts = []dwsv1alpha2.ClientMountInfo{ - dwsv1alpha2.ClientMountInfo{ + { Type: nnfStorage.Spec.FileSystemType, TargetType: "directory", - MountPath: fmt.Sprintf("/%s/%s", tempMountDir, nnfNodeStorageName(nnfStorage, index, 0)), + MountPath: getTempClientMountDir(nnfStorage, index), Device: dwsv1alpha2.ClientMountDevice{ Type: dwsv1alpha2.ClientMountDeviceTypeLustre, Lustre: &dwsv1alpha2.ClientMountDeviceLustre{ @@ -955,7 +991,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora switch clientMount.Status.Mounts[0].State { case dwsv1alpha2.ClientMountStateMounted: - if clientMount.Status.Mounts[0].Ready == false { + if !clientMount.Status.Mounts[0].Ready { return &ctrl.Result{}, nil } @@ -972,7 +1008,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora return &ctrl.Result{}, nil case dwsv1alpha2.ClientMountStateUnmounted: - if clientMount.Status.Mounts[0].Ready == false { + if !clientMount.Status.Mounts[0].Ready { return &ctrl.Result{}, nil } @@ -983,6 +1019,18 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora return &ctrl.Result{}, nil } +func getTempMountDir() string { + tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") + if len(tempMountDir) == 0 { + tempMountDir = "/mnt/tmp/" + } + return tempMountDir +} + +func getTempClientMountDir(nnfStorage *nnfv1alpha4.NnfStorage, index int) string { + return fmt.Sprintf("/%s/%s", getTempMountDir(), nnfNodeStorageName(nnfStorage, index, 0)) +} + // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha4.NnfStorage, deleting bool) error { @@ -1031,12 +1079,32 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf } if storage.Spec.FileSystemType == "lustre" { - // Delete the OSTs and MDTs first so we can drop the claim on the NnfLustreMgt resource. This will trigger - // an lctl command to run to remove the fsname from the MGT. childObjects := []dwsv1alpha2.ObjectList{ &nnfv1alpha4.NnfNodeStorageList{}, } + // Delete OST0 first so that PreUnmount commands can happen + ost0DeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha4.AllocationSetOST0Label: "true"}) + if err != nil { + return nodeStoragesExist, err + } + + // Collect status information from the NnfNodeStorage resources and aggregate it into the + // NnfStorage + for i := range storage.Status.AllocationSets { + _, err := r.aggregateNodeStorageStatus(ctx, storage, i, true, false) + if err != nil { + return nodeStoragesExist, err + } + } + + // Ensure OST0 is deleted before continuing + if !ost0DeleteStatus.Complete() { + return nodeStoragesExist, nil + } + + // Then, delete the rest of the OSTs and MDTs so we can drop the claim on the NnfLustreMgt + // resource. This will trigger an lctl command to run to remove the fsname from the MGT. ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha4.AllocationSetLabel: "ost"}) if err != nil { return nodeStoragesExist, err @@ -1050,7 +1118,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // Collect status information from the NnfNodeStorage resources and aggregate it into the // NnfStorage for i := range storage.Status.AllocationSets { - _, err := r.aggregateNodeStorageStatus(ctx, storage, i, true) + _, err := r.aggregateNodeStorageStatus(ctx, storage, i, true, false) if err != nil { return nodeStoragesExist, err } @@ -1091,7 +1159,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // Collect status information from the NnfNodeStorage resources and aggregate it into the // NnfStorage for i := range storage.Status.AllocationSets { - _, err := r.aggregateNodeStorageStatus(ctx, storage, i, true) + _, err := r.aggregateNodeStorageStatus(ctx, storage, i, true, false) if err != nil { return nodeStoragesExist, err } @@ -1176,6 +1244,34 @@ func nnfNodeStorageName(storage *nnfv1alpha4.NnfStorage, allocationSetIndex int, return storage.Namespace + "-" + storage.Name + "-" + storage.Spec.AllocationSets[allocationSetIndex].Name + "-" + strconv.Itoa(duplicateRabbitIndex) } +// Get the NnfNodeStorage for Lustre OST0 for a given NnfStorage +func (r *NnfStorageReconciler) getLustreOST0(ctx context.Context, storage *nnfv1alpha4.NnfStorage) (*nnfv1alpha4.NnfNodeStorage, error) { + if storage.Spec.FileSystemType != "lustre" { + return nil, nil + } + + // Get al the NnfNodeStorages for the OSTs + nnfNodeStorageList := &nnfv1alpha4.NnfNodeStorageList{} + matchLabels := dwsv1alpha2.MatchingOwner(storage) + matchLabels[nnfv1alpha4.AllocationSetLabel] = "ost" + + listOptions := []client.ListOption{ + matchLabels, + } + + if err := r.List(ctx, nnfNodeStorageList, listOptions...); err != nil { + return nil, dwsv1alpha2.NewResourceError("could not list NnfNodeStorages").WithError(err) + } + + for _, nnfNodeStorage := range nnfNodeStorageList.Items { + if nnfNodeStorage.Spec.LustreStorage.StartIndex == 0 { + return &nnfNodeStorage, nil + } + } + + return nil, nil +} + // SetupWithManager sets up the controller with the Manager. func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ diff --git a/pkg/filesystem/filesystem.go b/pkg/filesystem/filesystem.go index a2b2d9f1..193dee94 100644 --- a/pkg/filesystem/filesystem.go +++ b/pkg/filesystem/filesystem.go @@ -45,4 +45,10 @@ type FileSystem interface { // Run any commands against the activated file system before it is deactivated PreDeactivate(ctx context.Context) (bool, error) + + // Run any commands against the file system after it has been mounted + PostMount(ctx context.Context, complete bool) (bool, error) + + // Run any commands against the file system before it is unmounted + PreUnmount(ctx context.Context) (bool, error) } diff --git a/pkg/filesystem/kind.go b/pkg/filesystem/kind.go index 6905926e..7aae05ba 100644 --- a/pkg/filesystem/kind.go +++ b/pkg/filesystem/kind.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -39,7 +39,7 @@ type KindFileSystem struct { var _ FileSystem = &KindFileSystem{} func (m *KindFileSystem) Create(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -60,7 +60,7 @@ func (m *KindFileSystem) Destroy(ctx context.Context) (bool, error) { } func (m *KindFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -74,7 +74,7 @@ func (m *KindFileSystem) Deactivate(ctx context.Context) (bool, error) { } func (m *KindFileSystem) Mount(ctx context.Context, path string, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -95,7 +95,7 @@ func (m *KindFileSystem) Unmount(ctx context.Context, path string) (bool, error) } func (m *KindFileSystem) PostActivate(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -109,3 +109,19 @@ func (m *KindFileSystem) PreDeactivate(ctx context.Context) (bool, error) { return true, nil } + +func (m *KindFileSystem) PostMount(ctx context.Context, complete bool) (bool, error) { + if complete { + return false, nil + } + + m.Log.Info("Ran PostMount") + + return true, nil +} + +func (m *KindFileSystem) PreUnmount(ctx context.Context) (bool, error) { + m.Log.Info("Ran PreUnmount") + + return true, nil +} diff --git a/pkg/filesystem/lustre.go b/pkg/filesystem/lustre.go index 6e545989..d705c351 100644 --- a/pkg/filesystem/lustre.go +++ b/pkg/filesystem/lustre.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -40,6 +40,8 @@ type LustreFileSystemCommandArgs struct { Mount string PostActivate []string PreDeactivate []string + PostMount []string + PreUnmount []string Vars map[string]string } @@ -54,6 +56,7 @@ type LustreFileSystem struct { MgsAddress string Index int BackFs string + TempDir string BlockDevice blockdevice.BlockDevice } @@ -82,7 +85,7 @@ func (l *LustreFileSystem) parseArgs(args string) string { } func (l *LustreFileSystem) Create(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -212,8 +215,9 @@ func (l *LustreFileSystem) Mount(ctx context.Context, path string, complete bool } // Found an existing mount at this path. Check if it's the mount we expect - if m.Type != "lustre" { - return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + devStr := fmt.Sprintf("%s:/%s", l.MgsAddress, l.Name) + if m.Device != devStr || m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Expected device %s of type lustre, found device %s type %s", path, devStr, m.Device, m.Type) } // The file system is already mounted. Nothing left to do @@ -254,8 +258,9 @@ func (l *LustreFileSystem) Unmount(ctx context.Context, path string) (bool, erro } // Found an existing mount at this path. Check if it's the mount we expect - if m.Device != fmt.Sprintf("%s:/%s", l.MgsAddress, l.Name) || m.Type != "lustre" { - return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + devStr := fmt.Sprintf("%s:/%s", l.MgsAddress, l.Name) + if m.Device != devStr || m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Expected device %s of type lustre, found device %s type %s", path, devStr, m.Device, m.Type) } if _, err := command.Run(fmt.Sprintf("umount %s", path), l.Log); err != nil { @@ -311,5 +316,84 @@ func (l *LustreFileSystem) PreDeactivate(ctx context.Context) (bool, error) { } } - return false, nil + return true, nil +} + +func (l *LustreFileSystem) PostMount(ctx context.Context, complete bool) (bool, error) { + if len(l.CommandArgs.PostMount) == 0 { + return false, nil + } + + if complete { + return false, nil + } + + if l.TargetType == "none" { + return false, nil + } + + if _, err := l.Mount(ctx, l.TempDir, false); err != nil { + return false, fmt.Errorf("could not mount temp dir '%s' for post mount: %w", l.TempDir, err) + } + + // Build the commands from the args provided + if l.CommandArgs.Vars == nil { + l.CommandArgs.Vars = make(map[string]string) + } + l.CommandArgs.Vars["$MOUNT_PATH"] = filepath.Clean(l.TempDir) + + for _, rawCommand := range l.CommandArgs.PostMount { + formattedCommand := l.parseArgs(rawCommand) + l.Log.Info("PostMount", "command", formattedCommand) + + if _, err := command.Run(formattedCommand, l.Log); err != nil { + if _, unmountErr := l.Unmount(ctx, l.TempDir); unmountErr != nil { + return false, fmt.Errorf("could not unmount after post mount command failed: %s: %w", formattedCommand, unmountErr) + } + return false, fmt.Errorf("could not run post mount command: %s: %w", formattedCommand, err) + } + } + + if _, err := l.Unmount(ctx, l.TempDir); err != nil { + return false, fmt.Errorf("could not unmount after post mount '%s': %w", l.TempDir, err) + } + + return true, nil +} + +func (l *LustreFileSystem) PreUnmount(ctx context.Context) (bool, error) { + if len(l.CommandArgs.PreUnmount) == 0 { + return false, nil + } + + if l.TargetType == "none" { + return false, nil + } + + if _, err := l.Mount(ctx, l.TempDir, false); err != nil { + return false, fmt.Errorf("could not mount temp dir '%s' for pre unmount: %w", l.TempDir, err) + } + // Build the commands from the args provided + if l.CommandArgs.Vars == nil { + l.CommandArgs.Vars = make(map[string]string) + } + l.CommandArgs.Vars["$MOUNT_PATH"] = filepath.Clean(l.TempDir) + + for _, rawCommand := range l.CommandArgs.PreUnmount { + formattedCommand := l.parseArgs(rawCommand) + l.Log.Info("PreUnmount", "command", formattedCommand) + + if _, err := command.Run(formattedCommand, l.Log); err != nil { + if _, unmountErr := l.Unmount(ctx, l.TempDir); unmountErr != nil { + return false, fmt.Errorf("could not unmount after pre unmount command failed: %s: %w", formattedCommand, unmountErr) + } + return false, fmt.Errorf("could not run pre unmount command: %s: %w", formattedCommand, err) + } + } + + if _, err := l.Unmount(ctx, l.TempDir); err != nil { + return false, fmt.Errorf("could not unmount after pre unmount '%s': %w", l.TempDir, err) + } + + return true, nil } diff --git a/pkg/filesystem/mock.go b/pkg/filesystem/mock.go index ed10a88f..6ec1df71 100644 --- a/pkg/filesystem/mock.go +++ b/pkg/filesystem/mock.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -37,7 +37,7 @@ type MockFileSystem struct { var _ FileSystem = &MockFileSystem{} func (m *MockFileSystem) Create(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -52,7 +52,7 @@ func (m *MockFileSystem) Destroy(ctx context.Context) (bool, error) { } func (m *MockFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -67,7 +67,7 @@ func (m *MockFileSystem) Deactivate(ctx context.Context) (bool, error) { } func (m *MockFileSystem) Mount(ctx context.Context, path string, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -82,7 +82,7 @@ func (m *MockFileSystem) Unmount(ctx context.Context, path string) (bool, error) } func (m *MockFileSystem) PostActivate(ctx context.Context, complete bool) (bool, error) { - if complete == true { + if complete { return false, nil } @@ -96,3 +96,19 @@ func (m *MockFileSystem) PreDeactivate(ctx context.Context) (bool, error) { return true, nil } + +func (m *MockFileSystem) PostMount(ctx context.Context, complete bool) (bool, error) { + if complete { + return false, nil + } + + m.Log.Info("Ran PostMount") + + return true, nil +} + +func (m *MockFileSystem) PreUnmount(ctx context.Context) (bool, error) { + m.Log.Info("Ran PreUnmount") + + return true, nil +} diff --git a/pkg/filesystem/simple.go b/pkg/filesystem/simple.go index 1133daa4..2e98de28 100644 --- a/pkg/filesystem/simple.go +++ b/pkg/filesystem/simple.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -34,10 +34,10 @@ import ( ) type SimpleFileSystemCommandArgs struct { - Mkfs string - Mount string - PostActivate []string - PreDeactivate []string + Mkfs string + Mount string + PostMount []string + PreUnmount []string Vars map[string]string } @@ -198,8 +198,8 @@ func (f *SimpleFileSystem) Unmount(ctx context.Context, path string) (bool, erro return false, nil } -func (f *SimpleFileSystem) PostActivate(ctx context.Context, complete bool) (bool, error) { - if len(f.CommandArgs.PostActivate) == 0 { +func (f *SimpleFileSystem) PostMount(ctx context.Context, complete bool) (bool, error) { + if len(f.CommandArgs.PostMount) == 0 { return false, nil } @@ -212,7 +212,7 @@ func (f *SimpleFileSystem) PostActivate(ctx context.Context, complete bool) (boo } if _, err := f.Mount(ctx, f.TempDir, false); err != nil { - return false, fmt.Errorf("could not mount temp dir '%s': %w", f.TempDir, err) + return false, fmt.Errorf("could not mount temp dir '%s' for post mount: %w", f.TempDir, err) } // Build the commands from the args provided @@ -221,9 +221,9 @@ func (f *SimpleFileSystem) PostActivate(ctx context.Context, complete bool) (boo } f.CommandArgs.Vars["$MOUNT_PATH"] = f.TempDir - for _, rawCommand := range f.CommandArgs.PostActivate { + for _, rawCommand := range f.CommandArgs.PostMount { formattedCommand := f.parseArgs(rawCommand) - f.Log.Info("PostActivate", "command", formattedCommand) + f.Log.Info("PostMount", "command", formattedCommand) if _, err := command.Run(formattedCommand, f.Log); err != nil { if _, unmountErr := f.Unmount(ctx, f.TempDir); unmountErr != nil { @@ -237,11 +237,11 @@ func (f *SimpleFileSystem) PostActivate(ctx context.Context, complete bool) (boo return false, fmt.Errorf("could not unmount after post activate '%s': %w", f.TempDir, err) } - return false, nil + return true, nil } -func (f *SimpleFileSystem) PreDeactivate(ctx context.Context) (bool, error) { - if len(f.CommandArgs.PreDeactivate) == 0 { +func (f *SimpleFileSystem) PreUnmount(ctx context.Context) (bool, error) { + if len(f.CommandArgs.PreUnmount) == 0 { return false, nil } @@ -250,7 +250,7 @@ func (f *SimpleFileSystem) PreDeactivate(ctx context.Context) (bool, error) { } if _, err := f.Mount(ctx, f.TempDir, false); err != nil { - return false, fmt.Errorf("could not mount temp dir '%s': %w", f.TempDir, err) + return false, fmt.Errorf("could not mount temp dir '%s' for pre unmount: %w", f.TempDir, err) } // Build the commands from the args provided @@ -259,21 +259,31 @@ func (f *SimpleFileSystem) PreDeactivate(ctx context.Context) (bool, error) { } f.CommandArgs.Vars["$MOUNT_PATH"] = f.TempDir - for _, rawCommand := range f.CommandArgs.PreDeactivate { + for _, rawCommand := range f.CommandArgs.PreUnmount { formattedCommand := f.parseArgs(rawCommand) - f.Log.Info("PreDeactivate", "command", formattedCommand) + f.Log.Info("PreUnmount", "command", formattedCommand) if _, err := command.Run(formattedCommand, f.Log); err != nil { if _, unmountErr := f.Unmount(ctx, f.TempDir); unmountErr != nil { - return false, fmt.Errorf("could not unmount after pre-deactivate command failed: %s: %w", formattedCommand, unmountErr) + return false, fmt.Errorf("could not unmount after pre-unmount command failed: %s: %w", formattedCommand, unmountErr) } - return false, fmt.Errorf("could not run pre-deactivate command: %s: %w", formattedCommand, err) + return false, fmt.Errorf("could not run pre-unmount command: %s: %w", formattedCommand, err) } } if _, err := f.Unmount(ctx, f.TempDir); err != nil { - return false, fmt.Errorf("could not unmount after pre-deactivate '%s': %w", f.TempDir, err) + return false, fmt.Errorf("could not unmount after pre-unmount'%s': %w", f.TempDir, err) } + return true, nil +} + +// PostActivate is not supported for simple filesystems +func (f *SimpleFileSystem) PostActivate(ctx context.Context, complete bool) (bool, error) { + return false, nil +} + +// PreDeactivate is not supported for simple filesystems +func (f *SimpleFileSystem) PreDeactivate(ctx context.Context) (bool, error) { return false, nil } From 5bf3a0eee9b906178ce4ca1833ed21fecc8c407d Mon Sep 17 00:00:00 2001 From: matthew-richerson <82597529+matthew-richerson@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:51:33 -0600 Subject: [PATCH 11/23] Add shared option to NnfSystemStorage (#417) * Add shared option to NnfSystemStorage The shared option determines whether to create a single allocation per Rabbit that is shared between all the computes on the Rabbit, or one allocation per compute node. Signed-off-by: Matt Richerson * default to shared=true for v1alpha2 and v1alpha3 Signed-off-by: Matt Richerson --------- Signed-off-by: Matt Richerson --- api/v1alpha1/conversion.go | 2 + api/v1alpha1/zz_generated.conversion.go | 1 + api/v1alpha2/conversion.go | 13 +++++- api/v1alpha2/zz_generated.conversion.go | 40 +++++++++++++------ api/v1alpha3/conversion.go | 13 +++++- api/v1alpha3/zz_generated.conversion.go | 40 +++++++++++++------ api/v1alpha4/nnfsystemstorage_types.go | 5 +++ .../nnf.cray.hpe.com_nnfsystemstorages.yaml | 7 ++++ .../controller/nnfsystemstorage_controller.go | 23 +++++++++-- .../nnfsystemstorage_controller_test.go | 4 ++ 10 files changed, 119 insertions(+), 29 deletions(-) diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index 72cbf1c1..7ccca516 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -494,8 +494,10 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { // Otherwise, you may comment out UnmarshalData() until it's needed. if hasAnno { dst.Spec.ExcludeDisabledRabbits = restored.Spec.ExcludeDisabledRabbits + dst.Spec.Shared = restored.Spec.Shared } else { dst.Spec.ExcludeDisabledRabbits = false + dst.Spec.Shared = true } return nil diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 015dc2a5..aede0f37 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -3200,6 +3200,7 @@ func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec( out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) out.Capacity = in.Capacity out.Type = in.Type + // WARNING: in.Shared requires manual conversion: does not exist in peer-type out.StorageProfile = in.StorageProfile out.MakeClientMounts = in.MakeClientMounts out.ClientMountPath = in.ClientMountPath diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index 4fb0531b..3825f60b 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -484,13 +484,20 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfSystemStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.Shared = restored.Spec.Shared + } else { + dst.Spec.Shared = true + } + return nil } @@ -637,3 +644,7 @@ func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmd func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProfileLustreCmdLines(in, out, s) } + +func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) +} diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index 73545eb2..c90daa08 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -848,11 +848,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) }); err != nil { @@ -873,6 +868,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + }); err != nil { + return err + } return nil } @@ -3128,7 +3128,17 @@ func Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(in *v1alpha4 func autoConvert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfSystemStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3139,7 +3149,17 @@ func Convert_v1alpha2_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in * func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha2_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfSystemStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha2_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3181,17 +3201,13 @@ func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec( out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) out.Capacity = in.Capacity out.Type = in.Type + // WARNING: in.Shared requires manual conversion: does not exist in peer-type out.StorageProfile = in.StorageProfile out.MakeClientMounts = in.MakeClientMounts out.ClientMountPath = in.ClientMountPath return nil } -// Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) -} - func autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index 504ad723..32a98544 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -491,13 +491,20 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfSystemStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.Shared = restored.Spec.Shared + } else { + dst.Spec.Shared = true + } + return nil } @@ -651,3 +658,7 @@ func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmd func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProfileLustreCmdLines(in, out, s) } + +func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index dd057153..3470fa2c 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -843,11 +843,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) }); err != nil { @@ -873,6 +868,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) + }); err != nil { + return err + } return nil } @@ -3127,7 +3127,17 @@ func Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(in *v1alpha4 func autoConvert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfSystemStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3138,7 +3148,17 @@ func Convert_v1alpha3_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in * func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha3_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfSystemStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfSystemStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha3_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3180,17 +3200,13 @@ func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec( out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) out.Capacity = in.Capacity out.Type = in.Type + // WARNING: in.Shared requires manual conversion: does not exist in peer-type out.StorageProfile = in.StorageProfile out.MakeClientMounts = in.MakeClientMounts out.ClientMountPath = in.ClientMountPath return nil } -// Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) -} - func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready out.ResourceError = in.ResourceError diff --git a/api/v1alpha4/nnfsystemstorage_types.go b/api/v1alpha4/nnfsystemstorage_types.go index 7ee4ed9a..2e29de8b 100644 --- a/api/v1alpha4/nnfsystemstorage_types.go +++ b/api/v1alpha4/nnfsystemstorage_types.go @@ -83,6 +83,11 @@ type NnfSystemStorageSpec struct { // +kubebuilder:default:=raw Type string `json:"type,omitempty"` + // Shared will create one allocation per Rabbit rather than one allocation + // per compute node. + // +kubebuilder:default:=true + Shared bool `json:"shared"` + // StorageProfile is an object reference to the storage profile to use StorageProfile corev1.ObjectReference `json:"storageProfile"` diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml index 093f1dc5..4dc25e43 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml @@ -819,6 +819,12 @@ spec: MakeClientMounts specifies whether to make ClientMount resources or just make the devices available to the client type: boolean + shared: + default: true + description: |- + Shared will create one allocation per Rabbit rather than one allocation + per compute node. + type: boolean storageProfile: description: StorageProfile is an object reference to the storage profile to use @@ -921,6 +927,7 @@ spec: required: - capacity - makeClientMounts + - shared - storageProfile type: object status: diff --git a/internal/controller/nnfsystemstorage_controller.go b/internal/controller/nnfsystemstorage_controller.go index 8587d618..30d95f4c 100644 --- a/internal/controller/nnfsystemstorage_controller.go +++ b/internal/controller/nnfsystemstorage_controller.go @@ -291,6 +291,19 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste rabbitList = tempRabbitList } + allocationCount := 1 + if nnfSystemStorage.Spec.Shared == false { + switch nnfSystemStorage.Spec.ComputesTarget { + case nnfv1alpha4.ComputesTargetAll: + allocationCount = 16 + case nnfv1alpha4.ComputesTargetEven, nnfv1alpha4.ComputesTargetOdd: + allocationCount = 8 + case nnfv1alpha4.ComputesTargetPattern: + allocationCount = len(nnfSystemStorage.Spec.ComputesPattern) + default: + return dwsv1alpha2.NewResourceError("unexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() + } + } // Use the Rabbit list to fill in the servers resource with one allocation per Rabbit servers := &dwsv1alpha2.Servers{ ObjectMeta: metav1.ObjectMeta{ @@ -312,7 +325,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste servers.Spec.AllocationSets[0].Storage = []dwsv1alpha2.ServersSpecStorage{} for _, rabbitName := range rabbitList { - servers.Spec.AllocationSets[0].Storage = append(servers.Spec.AllocationSets[0].Storage, dwsv1alpha2.ServersSpecStorage{Name: rabbitName, AllocationCount: 1}) + servers.Spec.AllocationSets[0].Storage = append(servers.Spec.AllocationSets[0].Storage, dwsv1alpha2.ServersSpecStorage{Name: rabbitName, AllocationCount: allocationCount}) } return ctrl.SetControllerReference(nnfSystemStorage, servers, r.Scheme) @@ -384,7 +397,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst case nnfv1alpha4.ComputesTargetPattern: indexList = append([]int(nil), nnfSystemStorage.Spec.ComputesPattern...) default: - return dwsv1alpha2.NewResourceError("undexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() + return dwsv1alpha2.NewResourceError("unexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() } indexMap := map[int]bool{} @@ -592,7 +605,11 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys nnfAccess.Spec.DesiredState = "mounted" nnfAccess.Spec.UserID = 0 nnfAccess.Spec.GroupID = 0 - nnfAccess.Spec.Target = "shared" + if nnfSystemStorage.Spec.Shared { + nnfAccess.Spec.Target = "shared" + } else { + nnfAccess.Spec.Target = "single" + } nnfAccess.Spec.MakeClientMounts = nnfSystemStorage.Spec.MakeClientMounts nnfAccess.Spec.MountPath = nnfSystemStorage.Spec.ClientMountPath nnfAccess.Spec.ClientReference = corev1.ObjectReference{ diff --git a/internal/controller/nnfsystemstorage_controller_test.go b/internal/controller/nnfsystemstorage_controller_test.go index 65a663a1..4f145c9c 100644 --- a/internal/controller/nnfsystemstorage_controller_test.go +++ b/internal/controller/nnfsystemstorage_controller_test.go @@ -304,6 +304,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Type: "raw", ComputesTarget: nnfv1alpha4.ComputesTargetAll, MakeClientMounts: false, + Shared: true, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), @@ -363,6 +364,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Type: "raw", ComputesTarget: nnfv1alpha4.ComputesTargetEven, MakeClientMounts: false, + Shared: true, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), @@ -423,6 +425,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { ComputesTarget: nnfv1alpha4.ComputesTargetPattern, ComputesPattern: []int{0, 1, 2, 3, 4}, MakeClientMounts: false, + Shared: true, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), @@ -484,6 +487,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { ExcludeRabbits: []string{nodeNames[0]}, ExcludeComputes: []string{"1-4", "1-5", "1-6"}, MakeClientMounts: false, + Shared: true, Capacity: 1073741824, StorageProfile: corev1.ObjectReference{ Name: storageProfile.GetName(), From 35b9db119580d697063ad146e189c17b06436917 Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Wed, 20 Nov 2024 16:14:11 -0600 Subject: [PATCH 12/23] Remove v1alpha1 API (#419) Signed-off-by: Dean Roehrich --- Makefile | 2 +- api/v1alpha1/conversion.go | 651 - api/v1alpha1/conversion_test.go | 107 - api/v1alpha1/doc.go | 23 - api/v1alpha1/groupversion_info.go | 42 - api/v1alpha1/nnf_resource_condition_types.go | 115 - api/v1alpha1/nnf_resource_health_type.go | 68 - api/v1alpha1/nnf_resource_state_type.go | 48 - api/v1alpha1/nnf_resource_status_type.go | 152 - api/v1alpha1/nnf_resource_type.go | 33 - api/v1alpha1/nnfaccess_types.go | 131 - api/v1alpha1/nnfcontainerprofile_types.go | 140 - api/v1alpha1/nnfdatamovement_types.go | 289 - api/v1alpha1/nnfdatamovementmanager_types.go | 106 - api/v1alpha1/nnfdatamovementprofile_types.go | 123 - api/v1alpha1/nnflustremgt_types.go | 108 - api/v1alpha1/nnfnode_types.go | 132 - api/v1alpha1/nnfnodeblockstorage_types.go | 137 - api/v1alpha1/nnfnodeecdata_types.go | 69 - api/v1alpha1/nnfnodestorage_types.go | 154 - api/v1alpha1/nnfportmanager_types.go | 142 - api/v1alpha1/nnfstorage_types.go | 183 - api/v1alpha1/nnfstorageprofile_types.go | 310 - api/v1alpha1/nnfsystemstorage_types.go | 137 - api/v1alpha1/workflow_helpers.go | 73 - api/v1alpha1/zz_generated.conversion.go | 3230 ---- api/v1alpha1/zz_generated.deepcopy.go | 2022 --- cmd/main.go | 3 - .../bases/nnf.cray.hpe.com_nnfaccesses.yaml | 250 - ...nnf.cray.hpe.com_nnfcontainerprofiles.yaml | 14845 ---------------- ....cray.hpe.com_nnfdatamovementmanagers.yaml | 7378 -------- ....cray.hpe.com_nnfdatamovementprofiles.yaml | 126 - .../nnf.cray.hpe.com_nnfdatamovements.yaml | 406 - .../bases/nnf.cray.hpe.com_nnflustremgts.yaml | 271 - ...nnf.cray.hpe.com_nnfnodeblockstorages.yaml | 163 - .../bases/nnf.cray.hpe.com_nnfnodeecdata.yaml | 40 - .../crd/bases/nnf.cray.hpe.com_nnfnodes.yaml | 160 - .../nnf.cray.hpe.com_nnfnodestorages.yaml | 219 - .../nnf.cray.hpe.com_nnfportmanagers.yaml | 237 - .../nnf.cray.hpe.com_nnfstorageprofiles.yaml | 581 - .../bases/nnf.cray.hpe.com_nnfstorages.yaml | 295 - .../nnf.cray.hpe.com_nnfsystemstorages.yaml | 234 - internal/controller/conversion_test.go | 85 - internal/controller/suite_test.go | 4 - 44 files changed, 1 insertion(+), 34023 deletions(-) delete mode 100644 api/v1alpha1/conversion.go delete mode 100644 api/v1alpha1/conversion_test.go delete mode 100644 api/v1alpha1/doc.go delete mode 100644 api/v1alpha1/groupversion_info.go delete mode 100644 api/v1alpha1/nnf_resource_condition_types.go delete mode 100644 api/v1alpha1/nnf_resource_health_type.go delete mode 100644 api/v1alpha1/nnf_resource_state_type.go delete mode 100644 api/v1alpha1/nnf_resource_status_type.go delete mode 100644 api/v1alpha1/nnf_resource_type.go delete mode 100644 api/v1alpha1/nnfaccess_types.go delete mode 100644 api/v1alpha1/nnfcontainerprofile_types.go delete mode 100644 api/v1alpha1/nnfdatamovement_types.go delete mode 100644 api/v1alpha1/nnfdatamovementmanager_types.go delete mode 100644 api/v1alpha1/nnfdatamovementprofile_types.go delete mode 100644 api/v1alpha1/nnflustremgt_types.go delete mode 100644 api/v1alpha1/nnfnode_types.go delete mode 100644 api/v1alpha1/nnfnodeblockstorage_types.go delete mode 100644 api/v1alpha1/nnfnodeecdata_types.go delete mode 100644 api/v1alpha1/nnfnodestorage_types.go delete mode 100644 api/v1alpha1/nnfportmanager_types.go delete mode 100644 api/v1alpha1/nnfstorage_types.go delete mode 100644 api/v1alpha1/nnfstorageprofile_types.go delete mode 100644 api/v1alpha1/nnfsystemstorage_types.go delete mode 100644 api/v1alpha1/workflow_helpers.go delete mode 100644 api/v1alpha1/zz_generated.conversion.go delete mode 100644 api/v1alpha1/zz_generated.deepcopy.go diff --git a/Makefile b/Makefile index c404412f..7cd944ef 100644 --- a/Makefile +++ b/Makefile @@ -407,7 +407,7 @@ $(CONVERSION_GEN): $(LOCALBIN) # Build conversion-gen from tools folder. # The SRC_DIRS value is a space-separated list of paths to old versions. # The --input-dirs value is a single path item; specify multiple --input-dirs # parameters if you have multiple old versions. -SRC_DIRS=./api/v1alpha1 ./api/v1alpha2 ./api/v1alpha3 +SRC_DIRS=./api/v1alpha2 ./api/v1alpha3 generate-go-conversions: $(CONVERSION_GEN) ## Generate conversions go code $(MAKE) clean-generated-conversions SRC_DIRS="$(SRC_DIRS)" $(CONVERSION_GEN) \ diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go deleted file mode 100644 index 7ccca516..00000000 --- a/api/v1alpha1/conversion.go +++ /dev/null @@ -1,651 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - apierrors "k8s.io/apimachinery/pkg/api/errors" - apiconversion "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/conversion" - logf "sigs.k8s.io/controller-runtime/pkg/log" - - nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" - utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" -) - -var convertlog = logf.Log.V(2).WithName("convert-v1alpha1") - -func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfAccess To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfAccess) - - if err := Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfAccess{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfAccess) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfAccess) - convertlog.Info("Convert NnfAccess From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfContainerProfile) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfContainerProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfContainerProfile) - - if err := Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfContainerProfile{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfContainerProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfContainerProfile) - convertlog.Info("Convert NnfContainerProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfDataMovement) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfDataMovement To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfDataMovement) - - if err := Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfDataMovement{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfDataMovement) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfDataMovement) - convertlog.Info("Convert NnfDataMovement From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfDataMovementManager) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfDataMovementManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfDataMovementManager) - - if err := Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfDataMovementManager{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfDataMovementManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfDataMovementManager) - convertlog.Info("Convert NnfDataMovementManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfDataMovementProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfDataMovementProfile) - - if err := Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfDataMovementProfile{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfDataMovementProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfDataMovementProfile) - convertlog.Info("Convert NnfDataMovementProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfLustreMGT) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfLustreMGT To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfLustreMGT) - - if err := Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfLustreMGT{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfLustreMGT) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfLustreMGT) - convertlog.Info("Convert NnfLustreMGT From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfNode) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfNode To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfNode) - - if err := Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfNode{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfNode) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfNode) - convertlog.Info("Convert NnfNode From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfNodeBlockStorage) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfNodeBlockStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfNodeBlockStorage) - - if err := Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfNodeBlockStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfNodeBlockStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfNodeBlockStorage) - convertlog.Info("Convert NnfNodeBlockStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfNodeECData) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfNodeECData To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfNodeECData) - - if err := Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfNodeECData{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfNodeECData) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfNodeECData) - convertlog.Info("Convert NnfNodeECData From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfNodeStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfNodeStorage) - - if err := Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfNodeStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfNodeStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfNodeStorage) - convertlog.Info("Convert NnfNodeStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfPortManager) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfPortManager To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfPortManager) - - if err := Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfPortManager{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfPortManager) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfPortManager) - convertlog.Info("Convert NnfPortManager From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfStorage) - - if err := Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - return nil -} - -func (dst *NnfStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfStorage) - convertlog.Info("Convert NnfStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfStorageProfile) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfStorageProfile To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfStorageProfile) - - if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfStorageProfile{} - hasAnno, err := utilconversion.UnmarshalData(src, restored) - if err != nil { - return err - } - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - - if hasAnno { - dst.Data.LustreStorage.MgtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostActivate...) - dst.Data.LustreStorage.MgtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreDeactivate...) - dst.Data.LustreStorage.MgtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PostMount...) - dst.Data.LustreStorage.MgtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtCmdLines.PreUnmount...) - dst.Data.LustreStorage.MgtMdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostActivate...) - dst.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreDeactivate...) - dst.Data.LustreStorage.MgtMdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PostMount...) - dst.Data.LustreStorage.MgtMdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MgtMdtCmdLines.PreUnmount...) - dst.Data.LustreStorage.MdtCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostActivate...) - dst.Data.LustreStorage.MdtCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreDeactivate...) - dst.Data.LustreStorage.MdtCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PostMount...) - dst.Data.LustreStorage.MdtCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.MdtCmdLines.PreUnmount...) - dst.Data.LustreStorage.OstCmdLines.PostActivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostActivate...) - dst.Data.LustreStorage.OstCmdLines.PreDeactivate = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreDeactivate...) - dst.Data.LustreStorage.OstCmdLines.PostMount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PostMount...) - dst.Data.LustreStorage.OstCmdLines.PreUnmount = append([]string(nil), restored.Data.LustreStorage.OstCmdLines.PreUnmount...) - dst.Data.RawStorage.CmdLines.PostMount = append([]string(nil), restored.Data.RawStorage.CmdLines.PostMount...) - dst.Data.RawStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.RawStorage.CmdLines.PreUnmount...) - dst.Data.XFSStorage.CmdLines.PostMount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PostMount...) - dst.Data.XFSStorage.CmdLines.PreUnmount = append([]string(nil), restored.Data.XFSStorage.CmdLines.PreUnmount...) - dst.Data.GFS2Storage.CmdLines.PostMount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PostMount...) - dst.Data.GFS2Storage.CmdLines.PreUnmount = append([]string(nil), restored.Data.GFS2Storage.CmdLines.PreUnmount...) - } - - return nil -} - -func (dst *NnfStorageProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfStorageProfile) - convertlog.Info("Convert NnfStorageProfile From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { - convertlog.Info("Convert NnfSystemStorage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - dst := dstRaw.(*nnfv1alpha4.NnfSystemStorage) - - if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(src, dst, nil); err != nil { - return err - } - - // Manually restore data. - restored := &nnfv1alpha4.NnfSystemStorage{} - hasAnno, err := utilconversion.UnmarshalData(src, restored) - if err != nil { - return err - } - - // EDIT THIS FUNCTION! If the annotation is holding anything that is - // hub-specific then copy it into 'dst' from 'restored'. - // Otherwise, you may comment out UnmarshalData() until it's needed. - if hasAnno { - dst.Spec.ExcludeDisabledRabbits = restored.Spec.ExcludeDisabledRabbits - dst.Spec.Shared = restored.Spec.Shared - } else { - dst.Spec.ExcludeDisabledRabbits = false - dst.Spec.Shared = true - } - - return nil -} - -func (dst *NnfSystemStorage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*nnfv1alpha4.NnfSystemStorage) - convertlog.Info("Convert NnfSystemStorage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) - - if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(src, dst, nil); err != nil { - return err - } - - // Preserve Hub data on down-conversion except for metadata. - return utilconversion.MarshalData(src, dst) -} - -// The List-based ConvertTo/ConvertFrom routines are never used by the -// conversion webhook, but the conversion-verifier tool wants to see them. -// The conversion-gen tool generated the Convert_X_to_Y routines, should they -// ever be needed. - -func resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: "nnf", Resource: resource} -} - -func (src *NnfAccessList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertTo") -} - -func (dst *NnfAccessList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfAccessList"), "ConvertFrom") -} - -func (src *NnfContainerProfileList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertTo") -} - -func (dst *NnfContainerProfileList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfContainerProfileList"), "ConvertFrom") -} - -func (src *NnfDataMovementList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertTo") -} - -func (dst *NnfDataMovementList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementList"), "ConvertFrom") -} - -func (src *NnfDataMovementManagerList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertTo") -} - -func (dst *NnfDataMovementManagerList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementManagerList"), "ConvertFrom") -} - -func (src *NnfDataMovementProfileList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertTo") -} - -func (dst *NnfDataMovementProfileList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfDataMovementProfileList"), "ConvertFrom") -} - -func (src *NnfLustreMGTList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertTo") -} - -func (dst *NnfLustreMGTList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfLustreMGTList"), "ConvertFrom") -} - -func (src *NnfNodeList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertTo") -} - -func (dst *NnfNodeList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeList"), "ConvertFrom") -} - -func (src *NnfNodeBlockStorageList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertTo") -} - -func (dst *NnfNodeBlockStorageList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeBlockStorageList"), "ConvertFrom") -} - -func (src *NnfNodeECDataList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertTo") -} - -func (dst *NnfNodeECDataList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeECDataList"), "ConvertFrom") -} - -func (src *NnfNodeStorageList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertTo") -} - -func (dst *NnfNodeStorageList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfNodeStorageList"), "ConvertFrom") -} - -func (src *NnfPortManagerList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertTo") -} - -func (dst *NnfPortManagerList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfPortManagerList"), "ConvertFrom") -} - -func (src *NnfStorageList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertTo") -} - -func (dst *NnfStorageList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfStorageList"), "ConvertFrom") -} - -func (src *NnfStorageProfileList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertTo") -} - -func (dst *NnfStorageProfileList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfStorageProfileList"), "ConvertFrom") -} - -func (src *NnfSystemStorageList) ConvertTo(dstRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertTo") -} - -func (dst *NnfSystemStorageList) ConvertFrom(srcRaw conversion.Hub) error { - return apierrors.NewMethodNotSupported(resource("NnfSystemStorageList"), "ConvertFrom") -} - -// The conversion-gen tool dropped these from zz_generated.conversion.go to -// force us to acknowledge that we are addressing the conversion requirements. -func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in, out, s) -} - -func Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *nnfv1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in, out, s) -} - -func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *nnfv1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s apiconversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in, out, s) -} diff --git a/api/v1alpha1/conversion_test.go b/api/v1alpha1/conversion_test.go deleted file mode 100644 index bfc85ae7..00000000 --- a/api/v1alpha1/conversion_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - - nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" - utilconversion "github.com/NearNodeFlash/nnf-sos/github/cluster-api/util/conversion" -) - -func TestFuzzyConversion(t *testing.T) { - - t.Run("for NnfAccess", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfAccess{}, - Spoke: &NnfAccess{}, - })) - - t.Run("for NnfContainerProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfContainerProfile{}, - Spoke: &NnfContainerProfile{}, - })) - - t.Run("for NnfDataMovement", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfDataMovement{}, - Spoke: &NnfDataMovement{}, - })) - - t.Run("for NnfDataMovementManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfDataMovementManager{}, - Spoke: &NnfDataMovementManager{}, - })) - - t.Run("for NnfDataMovementProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfDataMovementProfile{}, - Spoke: &NnfDataMovementProfile{}, - })) - - t.Run("for NnfLustreMGT", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfLustreMGT{}, - Spoke: &NnfLustreMGT{}, - })) - - t.Run("for NnfNode", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfNode{}, - Spoke: &NnfNode{}, - })) - - t.Run("for NnfNodeBlockStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfNodeBlockStorage{}, - Spoke: &NnfNodeBlockStorage{}, - })) - - t.Run("for NnfNodeECData", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfNodeECData{}, - Spoke: &NnfNodeECData{}, - })) - - t.Run("for NnfNodeStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfNodeStorage{}, - Spoke: &NnfNodeStorage{}, - })) - - t.Run("for NnfPortManager", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfPortManager{}, - Spoke: &NnfPortManager{}, - })) - - t.Run("for NnfStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfStorage{}, - Spoke: &NnfStorage{}, - })) - - t.Run("for NnfStorageProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfStorageProfile{}, - Spoke: &NnfStorageProfile{}, - })) - - t.Run("for NnfSystemStorage", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &nnfv1alpha4.NnfSystemStorage{}, - Spoke: &NnfSystemStorage{}, - })) - -} - -// Just touch ginkgo, so it's here to interpret any ginkgo args from -// "make test", so that doesn't fail on this test file. -var _ = BeforeSuite(func() {}) diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go deleted file mode 100644 index b22c3173..00000000 --- a/api/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// The following tag tells conversion-gen to generate conversion routines, and -// it tells conversion-gen the name of the hub version. -// +k8s:conversion-gen=github.com/NearNodeFlash/nnf-sos/api/v1alpha4 -package v1alpha1 diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go deleted file mode 100644 index 6ab8487d..00000000 --- a/api/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2021-2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package v1alpha1 contains API Schema definitions for the nnf v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=nnf.cray.hpe.com -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "nnf.cray.hpe.com", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme - - // Used by zz_generated.conversion.go. - localSchemeBuilder = SchemeBuilder.SchemeBuilder -) diff --git a/api/v1alpha1/nnf_resource_condition_types.go b/api/v1alpha1/nnf_resource_condition_types.go deleted file mode 100644 index ad0da390..00000000 --- a/api/v1alpha1/nnf_resource_condition_types.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2021-2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Types define the condition type that is recorded by the system. Each storage resource -// defines an array of conditions as state transitions. Entry into and out of the state -// is recorded by the metav1.ConditionStatus. Order must be preserved and consistent between -// the Index and string values. -const ( - ConditionIndexCreateStoragePool = iota - ConditionIndexDeleteStoragePool - ConditionIndexCreateStorageGroup - ConditionIndexCreateFileSystem - ConditionIndexCreateFileShare - ConditionIndexGetResource - ConditionIndexInvalidResource - // INSERT NEW ITEMS HERE - Ensure Condition string is at same index - - numConditions - - ConditionCreateStoragePool = "CreateStoragePool" - ConditionDeleteStoragePool = "DeleteStoragePool" - ConditionCreateStorageGroup = "CreateStorageGroup" - ConditionCreateFileSystem = "CreateFileSystem" - ConditionCreateFileShare = "CreateFileShare" - ConditionGetResource = "GetResource" - ConditionInvalidResource = "InvalidResource" - // INSERT NEW ITEMS HERE - Ensure NewConditions() is updated to contain item and correct ordering -) - -// NewConditions generates a new conditions array for NNFNodeStorage -func NewConditions() []metav1.Condition { - - types := []string{ - ConditionCreateStoragePool, - ConditionDeleteStoragePool, - ConditionCreateStorageGroup, - ConditionCreateFileSystem, - ConditionCreateFileShare, - ConditionGetResource, - ConditionInvalidResource, - } - - if numConditions != len(types) { - panic("Did you forget to include the condition in the types array?") - } - - c := make([]metav1.Condition, len(types)) - for idx := range c { - c[idx] = metav1.Condition{ - Type: types[idx], - Status: metav1.ConditionUnknown, - Reason: ConditionUnknown, - LastTransitionTime: metav1.Now(), - } - } - - c[ConditionIndexCreateStoragePool].Status = metav1.ConditionTrue - c[ConditionIndexCreateStoragePool].LastTransitionTime = metav1.Now() - - return c - -} - -// SetGetResourceFailureCondition sets/gets the specified condition to failed -func SetGetResourceFailureCondition(c []metav1.Condition, err error) { - c[ConditionIndexGetResource] = metav1.Condition{ - Type: ConditionGetResource, - Reason: ConditionFailed, - Status: metav1.ConditionTrue, - Message: err.Error(), - LastTransitionTime: metav1.Now(), - } -} - -// SetResourceInvalidCondition sets/gets the specified condition to invalid -func SetResourceInvalidCondition(c []metav1.Condition, err error) { - c[ConditionIndexInvalidResource] = metav1.Condition{ - Type: ConditionInvalidResource, - Reason: ConditionInvalid, - Status: metav1.ConditionTrue, - Message: err.Error(), - LastTransitionTime: metav1.Now(), - } -} - -// Reason implements the Reason field of a metav1.Condition. In accordance with the metav1.Condition, -// the value should be a CamelCase string and may not be empty. -const ( - ConditionUnknown = "Unknown" - ConditionFailed = "Failed" - ConditionInvalid = "Invalid" - ConditionSuccess = "Success" -) diff --git a/api/v1alpha1/nnf_resource_health_type.go b/api/v1alpha1/nnf_resource_health_type.go deleted file mode 100644 index 78e90c3a..00000000 --- a/api/v1alpha1/nnf_resource_health_type.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" -) - -// NnfResourceHealthType defines the health of an NNF resource. -type NnfResourceHealthType string - -const ( - // ResourceOkay is SF health OK - ResourceOkay NnfResourceHealthType = NnfResourceHealthType(sf.OK_RH) - - // ResourceWarning is SF health WARNING - ResourceWarning = NnfResourceHealthType(sf.WARNING_RH) - - // ResourceCritical is SF health CRITICAL - ResourceCritical = NnfResourceHealthType(sf.CRITICAL_RH) -) - -// ResourceHealth maps a SF ResourceStatus to an NNFResourceHealthType -func ResourceHealth(s sf.ResourceStatus) NnfResourceHealthType { - switch s.Health { - case sf.OK_RH: - return ResourceOkay - case sf.WARNING_RH: - return ResourceWarning - case sf.CRITICAL_RH: - return ResourceCritical - } - - panic("Unknown Resource Health " + string(s.Health)) -} - -// UpdateIfWorseThan examines the input health type and update the health if it is worse -// than the stored value -func (rht NnfResourceHealthType) UpdateIfWorseThan(health *NnfResourceHealthType) { - switch rht { - case ResourceWarning: - if *health == ResourceOkay { - *health = ResourceWarning - } - case ResourceCritical: - if *health != ResourceCritical { - *health = ResourceCritical - } - default: - } -} diff --git a/api/v1alpha1/nnf_resource_state_type.go b/api/v1alpha1/nnf_resource_state_type.go deleted file mode 100644 index 9a2fe504..00000000 --- a/api/v1alpha1/nnf_resource_state_type.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -// NnfResourceStateType defines valid states that a user can configure an NNF resource -type NnfResourceStateType string - -const ( - // - // Below reflects the current status of a static resource - // - - // ResourceEnable means this static NNF resource should be enabled. - ResourceEnable NnfResourceStateType = "Enable" - - // ResourceDisable means this static NNF resource should be disabled. Not all static resources can be disabled. - ResourceDisable = "Disable" - - // - // Below reflects the current status of a managed (user created) resource - // - - // ResourceCreate means the resource should be created and enabled for operation. For a newly - // created resource, the default state is create. - ResourceCreate NnfResourceStateType = "Create" - - // ResourceDestroy means the resource should be released from the allocated resource pool, and - // this resource and all child resources will be released to the free resource pools - // managed by the system. - ResourceDestroy = "Destroy" -) diff --git a/api/v1alpha1/nnf_resource_status_type.go b/api/v1alpha1/nnf_resource_status_type.go deleted file mode 100644 index 050f2ca1..00000000 --- a/api/v1alpha1/nnf_resource_status_type.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - - sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" -) - -// NnfResourceStatusType is the string that indicates the resource's status -type NnfResourceStatusType string - -const ( - // - // Below reflects the current status of a static resource - // - - // ResourceEnabled means the static NNF resource is enabled and ready to fullfil requests for - // managed resources. - ResourceEnabled NnfResourceStatusType = NnfResourceStatusType(sf.ENABLED_RST) - - // ResourceDisabled means the static NNF resource is present but disabled and not available for use - ResourceDisabled = NnfResourceStatusType(sf.DISABLED_RST) - - // ResourceNotPresent means the static NNF resource is not found; likely because it is disconnected - // or in a powered down state. - ResourceNotPresent = "NotPresent" - - // ResourceOffline means the static NNF resource is offline and the NNF Node cannot communicate with - // the resource. This differs from a NotPresent status in that the device is known to exist. - ResourceOffline = "Offline" - - // - // Below reflects the current status of a managed (user created) resource - // - - // ResourceStarting means the NNF resource is currently in the process of starting - resources - // are being prepared for transition to an Active state. - ResourceStarting = NnfResourceStatusType(sf.STARTING_RST) - - // ResourceDeleting means the NNF resource is currently in the process of being deleted - the resource - // and all child resources are being returned to the NNF node's free resources. Upon a successful - // deletion, the resource will be removed from the list of managed NNF resources - ResourceDeleting = "Deleting" - - // ResourceDeleted means the NNF resource was deleted. This reflects the state where the NNF resource does - // not exist in the NNF space, but the resource might still exist in Kubernetes. A resource in - // this state suggests that Kubernetes is unable to delete the object. - ResourceDeleted = "Deleted" - - // ResourceReady means the NNF resource is ready for use. - ResourceReady = "Ready" - - // ResourceFailed means the NNF resource has failed during startup or execution. A failed state is - // an unrecoverable condition. Additional information about the Failed cause can be found by - // looking at the owning resource's Conditions field. A failed resource can only be removed - // by transition to a Delete state. - ResourceFailed = "Failed" - - // ResourceInvalid means the NNF resource configuration is invalid due to an improper format or arrangement - // of listed resource parameters. - ResourceInvalid = "Invalid" -) - -// UpdateIfWorseThan updates the stored status of the resource if the new status is worse than what was stored -func (rst NnfResourceStatusType) UpdateIfWorseThan(status *NnfResourceStatusType) { - switch rst { - case ResourceStarting: - if *status == ResourceReady { - *status = ResourceStarting - } - case ResourceFailed: - if *status != ResourceFailed { - *status = ResourceFailed - } - default: - } -} - -func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha2.ResourceStatus { - switch rst { - case ResourceStarting: - return dwsv1alpha2.StartingStatus - case ResourceReady: - return dwsv1alpha2.ReadyStatus - case ResourceDisabled: - return dwsv1alpha2.DisabledStatus - case ResourceNotPresent: - return dwsv1alpha2.NotPresentStatus - case ResourceOffline: - return dwsv1alpha2.OfflineStatus - case ResourceFailed: - return dwsv1alpha2.FailedStatus - default: - return dwsv1alpha2.UnknownStatus - } -} - -// StaticResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. -func StaticResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { - switch s.State { - case sf.STARTING_RST: - return ResourceStarting - case sf.ENABLED_RST: - return ResourceReady - case sf.DISABLED_RST: - return ResourceDisabled - case sf.ABSENT_RST: - return ResourceNotPresent - case sf.UNAVAILABLE_OFFLINE_RST: - return ResourceOffline - } - - panic("Unknown Resource State " + string(s.State)) -} - -// ResourceStatus will convert a Swordfish ResourceStatus to the NNF Resource Status. -func ResourceStatus(s sf.ResourceStatus) NnfResourceStatusType { - switch s.State { - case sf.STARTING_RST: - return ResourceStarting - case sf.ENABLED_RST: - return ResourceReady - case sf.DISABLED_RST: - return ResourceDisabled - case sf.ABSENT_RST: - return ResourceNotPresent - case sf.UNAVAILABLE_OFFLINE_RST: - return ResourceOffline - - default: - return ResourceFailed - } -} diff --git a/api/v1alpha1/nnf_resource_type.go b/api/v1alpha1/nnf_resource_type.go deleted file mode 100644 index 5a99fc17..00000000 --- a/api/v1alpha1/nnf_resource_type.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -// NnfResourceStatus provides common fields that are included in all NNF Resources -type NnfResourceStatus struct { - // ID reflects the NNF Node unique identifier for this NNF Server resource. - ID string `json:"id,omitempty"` - - // Name reflects the common name of this NNF Server resource. - Name string `json:"name,omitempty"` - - Status NnfResourceStatusType `json:"status,omitempty"` - - Health NnfResourceHealthType `json:"health,omitempty"` -} diff --git a/api/v1alpha1/nnfaccess_types.go b/api/v1alpha1/nnfaccess_types.go deleted file mode 100644 index e72f3507..00000000 --- a/api/v1alpha1/nnfaccess_types.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NnfAccessSpec defines the desired state of NnfAccess -type NnfAccessSpec struct { - // DesiredState is the desired state for the mounts on the client - // +kubebuilder:validation:Enum=mounted;unmounted - DesiredState string `json:"desiredState"` - - // TeardownState is the desired state of the workflow for this NNF Access resource to - // be torn down and deleted. - // +kubebuilder:validation:Enum:=PreRun;PostRun;Teardown - // +kubebuilder:validation:Type:=string - TeardownState dwsv1alpha2.WorkflowState `json:"teardownState"` - - // Target specifies which storage targets the client should mount - // - single: Only one of the storage the client can access - // - all: All of the storage the client can access - // - shared: Multiple clients access the same storage - // +kubebuilder:validation:Enum=single;all;shared - Target string `json:"target"` - - // UserID for the new mount. Currently only used for raw - UserID uint32 `json:"userID"` - - // GroupID for the new mount. Currently only used for raw - GroupID uint32 `json:"groupID"` - - // ClientReference is for a client resource. (DWS) Computes is the only client - // resource type currently supported - ClientReference corev1.ObjectReference `json:"clientReference,omitempty"` - - // MountPath for the storage target on the client - MountPath string `json:"mountPath,omitempty"` - - // MakeClientMounts determines whether the ClientMount resources are made, or if only - // the access list on the NnfNodeBlockStorage is updated - // +kubebuilder:default=true - MakeClientMounts bool `json:"makeClientMounts"` - - // MountPathPrefix to mount the storage target on the client when there is - // more than one mount on a client - - MountPathPrefix string `json:"mountPathPrefix,omitempty"` - - // StorageReference is the NnfStorage reference - StorageReference corev1.ObjectReference `json:"storageReference"` -} - -// NnfAccessStatus defines the observed state of NnfAccess -type NnfAccessStatus struct { - // State is the current state - // +kubebuilder:validation:Enum=mounted;unmounted - State string `json:"state"` - - // Ready signifies whether status.state has been achieved - Ready bool `json:"ready"` - - dwsv1alpha2.ResourceError `json:",inline"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" -//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="The current state" -//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="Whether the state has been achieved" -//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" - -// NnfAccess is the Schema for the nnfaccesses API -type NnfAccess struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfAccessSpec `json:"spec,omitempty"` - Status NnfAccessStatus `json:"status,omitempty"` -} - -func (a *NnfAccess) GetStatus() updater.Status[*NnfAccessStatus] { - return &a.Status -} - -//+kubebuilder:object:root=true - -// NnfAccessList contains a list of NnfAccess -type NnfAccessList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfAccess `json:"items"` -} - -func (n *NnfAccessList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfAccess{}, &NnfAccessList{}) -} diff --git a/api/v1alpha1/nnfcontainerprofile_types.go b/api/v1alpha1/nnfcontainerprofile_types.go deleted file mode 100644 index f865308b..00000000 --- a/api/v1alpha1/nnfcontainerprofile_types.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - ContainerLabel = "nnf.cray.hpe.com/container" - ContainerUser = "user" - ContainerMPIUser = "mpiuser" -) - -// NnfContainerProfileSpec defines the desired state of NnfContainerProfile -type NnfContainerProfileData struct { - // Pinned is true if this instance is an immutable copy - // +kubebuilder:default:=false - Pinned bool `json:"pinned,omitempty"` - - // List of possible filesystems supported by this container profile - Storages []NnfContainerProfileStorage `json:"storages,omitempty"` - - // Containers are launched in the PreRun state. Allow this many seconds for the containers to - // start before declaring an error to the workflow. - // Defaults to 300 if not set. A value of 0 disables this behavior. - // +kubebuilder:default:=300 - // +kubebuilder:validation:Minimum:=0 - PreRunTimeoutSeconds *int64 `json:"preRunTimeoutSeconds,omitempty"` - - // Containers are expected to complete in the PostRun State. Allow this many seconds for the - // containers to exit before declaring an error the workflow. - // Defaults to 300 if not set. A value of 0 disables this behavior. - // +kubebuilder:default:=300 - // +kubebuilder:validation:Minimum:=0 - PostRunTimeoutSeconds *int64 `json:"postRunTimeoutSeconds,omitempty"` - - // Specifies the number of times a container will be retried upon a failure. A new pod is - // deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 - // disables retries. - // +kubebuilder:validation:Minimum:=0 - // +kubebuilder:default:=6 - RetryLimit int32 `json:"retryLimit"` - - // UserID specifies the user ID that is allowed to use this profile. If this is specified, only - // Workflows that have a matching user ID can select this profile. - UserID *uint32 `json:"userID,omitempty"` - - // GroupID specifies the group ID that is allowed to use this profile. If this is specified, - // only Workflows that have a matching group ID can select this profile. - GroupID *uint32 `json:"groupID,omitempty"` - - // Number of ports to open for communication with the user container. These ports are opened on - // the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). - // The requested ports are made available as environment variables inside the container and in - // the DWS workflow (NNF_CONTAINER_PORTS). - NumPorts int32 `json:"numPorts,omitempty"` - - // Spec to define the containers created from this profile. This is used for non-MPI containers. - // Refer to the K8s documentation for `PodSpec` for more definition: - // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec - // Either this or MPISpec must be provided, but not both. - Spec *corev1.PodSpec `json:"spec,omitempty"` - - // MPIJobSpec to define the MPI containers created from this profile. This functionality is - // provided via mpi-operator, a 3rd party tool to assist in running MPI applications across - // worker containers. - // Either this or Spec must be provided, but not both. - // - // All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for - // more detail: - // https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 - // - // Note: most of these fields are fully customizable with a few exceptions. These fields are - // overridden by NNF software to ensure proper behavior to interface with the DWS workflow - // - Replicas - // - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) - // - Worker/Launcher.RestartPolicy - MPISpec *mpiv2beta1.MPIJobSpec `json:"mpiSpec,omitempty"` -} - -// NnfContainerProfileStorage defines the mount point information that will be available to the -// container -type NnfContainerProfileStorage struct { - // Name specifies the name of the mounted filesystem; must match the user supplied #DW directive - Name string `json:"name"` - - // Optional designates that this filesystem is available to be mounted, but can be ignored by - // the user not supplying this filesystem in the #DW directives - //+kubebuilder:default:=false - Optional bool `json:"optional"` - - // For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in - // the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global - // lustre, otherwise empty. - PVCMode corev1.PersistentVolumeAccessMode `json:"pvcMode,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion - -// NnfContainerProfile is the Schema for the nnfcontainerprofiles API -type NnfContainerProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Data NnfContainerProfileData `json:"data"` -} - -// +kubebuilder:object:root=true - -// NnfContainerProfileList contains a list of NnfContainerProfile -type NnfContainerProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfContainerProfile `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfContainerProfile{}, &NnfContainerProfileList{}) -} diff --git a/api/v1alpha1/nnfdatamovement_types.go b/api/v1alpha1/nnfdatamovement_types.go deleted file mode 100644 index 6488e521..00000000 --- a/api/v1alpha1/nnfdatamovement_types.go +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - // The required namespace for an NNF Data Movement operation. This is for system wide (lustre) - // data movement. Individual nodes may also perform data movement in which case they use the - // NNF Node Name as the namespace. - DataMovementNamespace = "nnf-dm-system" - - // The namespace for NnfDataMovementProfiles that are not pinned. - DataMovementProfileNamespace = "nnf-system" -) - -// NnfDataMovementSpec defines the desired state of NnfDataMovement -type NnfDataMovementSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Source describes the source of the data movement operation - Source *NnfDataMovementSpecSourceDestination `json:"source,omitempty"` - - // Destination describes the destination of the data movement operation - Destination *NnfDataMovementSpecSourceDestination `json:"destination,omitempty"` - - // User Id specifies the user ID for the data movement operation. This value is used - // in conjunction with the group ID to ensure the user has valid permissions to perform - // the data movement operation. - UserId uint32 `json:"userId,omitempty"` - - // Group Id specifies the group ID for the data movement operation. This value is used - // in conjunction with the user ID to ensure the user has valid permissions to perform - // the data movement operation. - GroupId uint32 `json:"groupId,omitempty"` - - // Set to true if the data movement operation should be canceled. - // +kubebuilder:default:=false - Cancel bool `json:"cancel,omitempty"` - - // ProfileReference is an object reference to an NnfDataMovementProfile that is used to - // configure data movement. If empty, the default profile is used. - ProfileReference corev1.ObjectReference `json:"profileReference,omitempty"` - - // User defined configuration on how data movement should be performed. This overrides the - // configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values - // are typically set by the Copy Offload API. - UserConfig *NnfDataMovementConfig `json:"userConfig,omitempty"` -} - -// NnfDataMovementSpecSourceDestination defines the desired source or destination of data movement -type NnfDataMovementSpecSourceDestination struct { - - // Path describes the location of the user data relative to the storage instance - Path string `json:"path,omitempty"` - - // Storage describes the storage backing this data movement specification; Storage can reference - // either NNF storage or global Lustre storage depending on the object references Kind field. - StorageReference corev1.ObjectReference `json:"storageReference,omitempty"` -} - -// NnfDataMovementConfig provides a way for a user to override the data movement behavior on a -// per DM basis. -type NnfDataMovementConfig struct { - - // Fake the Data Movement operation. The system "performs" Data Movement but the command to do so - // is trivial. This means a Data Movement request is still submitted but the IO is skipped. - // +kubebuilder:default:=false - Dryrun bool `json:"dryrun,omitempty"` - - // Extra options to pass to the mpirun command (used to perform data movement). - MpirunOptions string `json:"mpirunOptions,omitempty"` - - // Extra options to pass to the dcp command (used to perform data movement). - DcpOptions string `json:"dcpOptions,omitempty"` - - // If true, enable the command's stdout to be saved in the log when the command completes - // successfully. On failure, the output is always logged. - // Note: Enabling this option may degrade performance. - // +kubebuilder:default:=false - LogStdout bool `json:"logStdout,omitempty"` - - // Similar to LogStdout, store the command's stdout in Status.Message when the command completes - // successfully. On failure, the output is always stored. - // Note: Enabling this option may degrade performance. - // +kubebuilder:default:=false - StoreStdout bool `json:"storeStdout,omitempty"` - - // The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in - // the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. - Slots *int `json:"slots,omitempty"` - - // The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots - // in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. - MaxSlots *int `json:"maxSlots,omitempty"` -} - -// NnfDataMovementCommandStatus defines the observed status of the underlying data movement -// command (MPI File Utils' `dcp` command). -type NnfDataMovementCommandStatus struct { - // The command that was executed during data movement. - Command string `json:"command,omitempty"` - - // ElapsedTime reflects the elapsed time since the underlying data movement command started. - ElapsedTime metav1.Duration `json:"elapsedTime,omitempty"` - - // ProgressPercentage refects the progress of the underlying data movement command as captured from - // standard output. A best effort is made to parse the command output as a percentage. If no - // progress has yet to be measured than this field is omitted. If the latest command output does - // not contain a valid percentage, then the value is unchanged from the previously parsed value. - ProgressPercentage *int32 `json:"progress,omitempty"` - - // LastMessage reflects the last message received over standard output or standard error as - // captured by the underlying data movement command. - LastMessage string `json:"lastMessage,omitempty"` - - // LastMessageTime reflects the time at which the last message was received over standard output - // or standard error by the underlying data movement command. - LastMessageTime metav1.MicroTime `json:"lastMessageTime,omitempty"` - - // Seconds is parsed from the dcp output when the command is finished. - Seconds string `json:"seconds,omitempty"` - - // Items is parsed from the dcp output when the command is finished. This is a total of - // the number of directories, files, and links that dcp copied. - Items *int32 `json:"items,omitempty"` - - // Directories is parsed from the dcp output when the command is finished. This is the number of - // directories that dcp copied. Note: This value may be inflated due to NNF index mount - // directories when copying from XFS or GFS2 filesystems. - Directories *int32 `json:"directories,omitempty"` - - // Files is parsed from the dcp output when the command is finished. This is the number of files - // that dcp copied. - Files *int32 `json:"files,omitempty"` - - // Links is parsed from the dcp output when the command is finished. This is the number of links - // that dcp copied. - Links *int32 `json:"links,omitempty"` - - // Data is parsed from the dcp output when the command is finished. This is the total amount of - // data copied by dcp. - Data string `json:"data,omitempty"` - - // Rate is parsed from the dcp output when the command is finished. This is transfer rate of the - // data copied by dcp. - Rate string `json:"rate,omitempty"` -} - -// NnfDataMovementStatus defines the observed state of NnfDataMovement -type NnfDataMovementStatus struct { - // Current state of data movement. - // +kubebuilder:validation:Enum=Starting;Running;Finished - State string `json:"state,omitempty"` - - // Status of the current state. - // +kubebuilder:validation:Enum=Success;Failed;Invalid;Cancelled - Status string `json:"status,omitempty"` - - // Message contains any text that explains the Status. If Data Movement failed or storeStdout is - // enabled, this will contain the command's output. - Message string `json:"message,omitempty"` - - // StartTime reflects the time at which the Data Movement operation started. - StartTime *metav1.MicroTime `json:"startTime,omitempty"` - - // EndTime reflects the time at which the Data Movement operation ended. - EndTime *metav1.MicroTime `json:"endTime,omitempty"` - - // Restarts contains the number of restarts of the Data Movement operation. - Restarts int `json:"restarts,omitempty"` - - // CommandStatus reflects the current status of the underlying Data Movement command - // as it executes. The command status is polled at a certain frequency to avoid excessive - // updates to the Data Movement resource. - CommandStatus *NnfDataMovementCommandStatus `json:"commandStatus,omitempty"` - - dwsv1alpha2.ResourceError `json:",inline"` -} - -// Types describing the various data movement status conditions. -const ( - DataMovementConditionTypeStarting = "Starting" - DataMovementConditionTypeRunning = "Running" - DataMovementConditionTypeFinished = "Finished" -) - -// Reasons describing the various data movement status conditions. Must be -// in CamelCase format (see metav1.Condition) -const ( - DataMovementConditionReasonSuccess = "Success" - DataMovementConditionReasonFailed = "Failed" - DataMovementConditionReasonInvalid = "Invalid" - DataMovementConditionReasonCancelled = "Cancelled" -) - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" -//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Status of current state" -//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" - -// NnfDataMovement is the Schema for the nnfdatamovements API -type NnfDataMovement struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfDataMovementSpec `json:"spec,omitempty"` - Status NnfDataMovementStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// NnfDataMovementList contains a list of NnfDataMovement -type NnfDataMovementList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfDataMovement `json:"items"` -} - -func (n *NnfDataMovementList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -const ( - // DataMovementTeardownStateLabel is the label applied to Data Movement and related resources that describes - // the workflow state when the resource is no longer need and can be safely deleted. - DataMovementTeardownStateLabel = "nnf.cray.hpe.com/teardown_state" - - // DataMovementInitiatorLabel is the label applied to Data Movement resources that describes the origin of - // data movement request. This would be from a copy_in/copy_out directive or from a compute node via the - // Copy Offload API (i.e. nnf-dm daemon). - DataMovementInitiatorLabel = "dm.cray.hpe.com/initiator" -) - -func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha2.WorkflowState) { - labels := object.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - - labels[DataMovementTeardownStateLabel] = string(state) - object.SetLabels(labels) -} - -func AddDataMovementInitiatorLabel(object metav1.Object, initiator string) { - labels := object.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - - labels[DataMovementInitiatorLabel] = initiator - object.SetLabels(labels) -} - -func init() { - SchemeBuilder.Register(&NnfDataMovement{}, &NnfDataMovementList{}) -} diff --git a/api/v1alpha1/nnfdatamovementmanager_types.go b/api/v1alpha1/nnfdatamovementmanager_types.go deleted file mode 100644 index 3d01a4b8..00000000 --- a/api/v1alpha1/nnfdatamovementmanager_types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2022-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/DataWorkflowServices/dws/utils/updater" -) - -const ( - DataMovementWorkerLabel = "dm.cray.hpe.com/worker" - - // The name of the expected Data Movement manager. This is to ensure Data Movement is ready in - // the DataIn/DataOut stages before attempting data movement operations. - DataMovementManagerName = "nnf-dm-manager-controller-manager" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager -type NnfDataMovementManagerSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated - // to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. - Selector metav1.LabelSelector `json:"selector"` - - // Template defines the pod template that is used for the basis of the worker Daemon Set that - // manages the per node data movement operations. - Template corev1.PodTemplateSpec `json:"template"` - - // UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set - // that manages the per node data movement operations. - UpdateStrategy appsv1.DaemonSetUpdateStrategy `json:"updateStrategy"` - - // Host Path defines the directory location of shared mounts on an individual worker node. - HostPath string `json:"hostPath"` - - // Mount Path defines the location within the container at which the Host Path volume should be mounted. - MountPath string `json:"mountPath"` -} - -// NnfDataMovementManagerStatus defines the observed state of NnfDataMovementManager -type NnfDataMovementManagerStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Ready indicates that the Data Movement Manager has achieved the desired readiness state - // and all managed resources are initialized. - // +kubebuilder:default:=false - Ready bool `json:"ready"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if manager readied all resoures" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" - -// NnfDataMovementManager is the Schema for the nnfdatamovementmanagers API -type NnfDataMovementManager struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfDataMovementManagerSpec `json:"spec,omitempty"` - Status NnfDataMovementManagerStatus `json:"status,omitempty"` -} - -func (m *NnfDataMovementManager) GetStatus() updater.Status[*NnfDataMovementManagerStatus] { - return &m.Status -} - -//+kubebuilder:object:root=true - -// NnfDataMovementManagerList contains a list of NnfDataMovementManager -type NnfDataMovementManagerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfDataMovementManager `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfDataMovementManager{}, &NnfDataMovementManagerList{}) -} diff --git a/api/v1alpha1/nnfdatamovementprofile_types.go b/api/v1alpha1/nnfdatamovementprofile_types.go deleted file mode 100644 index d087306b..00000000 --- a/api/v1alpha1/nnfdatamovementprofile_types.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile -type NnfDataMovementProfileData struct { - - // Default is true if this instance is the default resource to use - // +kubebuilder:default:=false - Default bool `json:"default,omitempty"` - - // Pinned is true if this instance is an immutable copy - // +kubebuilder:default:=false - Pinned bool `json:"pinned,omitempty"` - - // Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of - // slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. - // +kubebuilder:default:=8 - // +kubebuilder:validation:Minimum:=0 - Slots int `json:"slots"` - - // MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the - // use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. - // +kubebuilder:default:=0 - // +kubebuilder:validation:Minimum:=0 - MaxSlots int `json:"maxSlots"` - - // Command to execute to perform data movement. $VARS are replaced by the nnf software and must - // be present in the command. - // Available $VARS: - // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the - // slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` - // UID: User ID that is inherited from the Workflow - // GID: Group ID that is inherited from the Workflow - // SRC: source for the data movement - // DEST destination for the data movement - // +kubebuilder:default:="ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE dcp --progress 1 --uid $UID --gid $GID $SRC $DEST" - Command string `json:"command"` - - // If true, enable the command's stdout to be saved in the log when the command completes - // successfully. On failure, the output is always logged. - // +kubebuilder:default:=false - LogStdout bool `json:"logStdout,omitempty"` - - // Similar to logStdout, store the command's stdout in Status.Message when the command completes - // successfully. On failure, the output is always stored. - // +kubebuilder:default:=false - StoreStdout bool `json:"storeStdout,omitempty"` - - // NnfDataMovement resources have the ability to collect and store the progress percentage and the - // last few lines of output in the CommandStatus field. This number is used for the interval to collect - // the progress data. `dcp --progress N` must be included in the data movement command in order for - // progress to be collected. A value of 0 disables this functionality. - // +kubebuilder:default:=5 - // +kubebuilder:validation:Minimum:=0 - ProgressIntervalSeconds int `json:"progressIntervalSeconds,omitempty"` - - // CreateDestDir will ensure that the destination directory exists before performing data - // movement. This will cause a number of stat commands to determine the source and destination - // file types, so that the correct pathing for the destination can be determined. Then, a mkdir - // is issued. - // +kubebuilder:default:=true - CreateDestDir bool `json:"createDestDir"` - - // If CreateDestDir is true, then use StatCommand to perform the stat commands. - // Use setpriv to stat the path with the specified UID/GID. - // Available $VARS: - // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the - // slots/max_slots for each host. This hostfile is created at - // `/tmp//hostfile`. This is the same hostfile used as the one for Command. - // UID: User ID that is inherited from the Workflow - // GID: Group ID that is inherited from the Workflow - // PATH: Path to stat - // +kubebuilder:default:="mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- setpriv --euid $UID --egid $GID --clear-groups stat --cached never -c '%F' $PATH" - StatCommand string `json:"statCommand"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion -// +kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" - -// NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles API -type NnfDataMovementProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Data NnfDataMovementProfileData `json:"data,omitempty"` -} - -// +kubebuilder:object:root=true - -// NnfDataMovementProfileList contains a list of NnfDataMovementProfile -type NnfDataMovementProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfDataMovementProfile `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfDataMovementProfile{}, &NnfDataMovementProfileList{}) -} diff --git a/api/v1alpha1/nnflustremgt_types.go b/api/v1alpha1/nnflustremgt_types.go deleted file mode 100644 index 25aeea17..00000000 --- a/api/v1alpha1/nnflustremgt_types.go +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NnfLustreMGTSpec defines the desired state of NnfLustreMGT -type NnfLustreMGTSpec struct { - // Addresses is the list of LNet addresses for the MGT - Addresses []string `json:"addresses"` - - // FsNameBlackList is a list of fsnames that can't be used. This may be - // necessary if the MGT hosts file systems external to Rabbit - FsNameBlackList []string `json:"fsNameBlackList,omitempty"` - - // FsNameStart is the starting fsname to be used - // +kubebuilder:validation:MaxLength:=8 - // +kubebuilder:validation:MinLength:=8 - FsNameStart string `json:"fsNameStart,omitempty"` - - // FsNameStartReference can be used to add a configmap where the starting fsname is - // stored. If this reference is set, it takes precendence over FsNameStart. The configmap - // will be updated with the next available fsname anytime an fsname is used. - FsNameStartReference corev1.ObjectReference `json:"fsNameStartReference,omitempty"` - - // ClaimList is the list of currently in use fsnames - ClaimList []corev1.ObjectReference `json:"claimList,omitempty"` -} - -// NnfLustreMGTStatus defines the current state of NnfLustreMGT -type NnfLustreMGTStatus struct { - // FsNameNext is the next available fsname that hasn't been used - // +kubebuilder:validation:MaxLength:=8 - // +kubebuilder:validation:MinLength:=8 - FsNameNext string `json:"fsNameNext,omitempty"` - - // ClaimList is the list of currently in use fsnames - ClaimList []NnfLustreMGTStatusClaim `json:"claimList,omitempty"` - - dwsv1alpha2.ResourceError `json:",inline"` -} - -type NnfLustreMGTStatusClaim struct { - Reference corev1.ObjectReference `json:"reference,omitempty"` - FsName string `json:"fsname,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion -// +kubebuilder:subresource:status -// NnfLustreMGT is the Schema for the nnfstorageprofiles API -type NnfLustreMGT struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfLustreMGTSpec `json:"spec,omitempty"` - Status NnfLustreMGTStatus `json:"status,omitempty"` -} - -func (a *NnfLustreMGT) GetStatus() updater.Status[*NnfLustreMGTStatus] { - return &a.Status -} - -//+kubebuilder:object:root=true - -// NnfLustreMGTList contains a list of NnfLustreMGT -type NnfLustreMGTList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfLustreMGT `json:"items"` -} - -func (n *NnfLustreMGTList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfLustreMGT{}, &NnfLustreMGTList{}) -} diff --git a/api/v1alpha1/nnfnode_types.go b/api/v1alpha1/nnfnode_types.go deleted file mode 100644 index 93c3d56b..00000000 --- a/api/v1alpha1/nnfnode_types.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - "github.com/DataWorkflowServices/dws/utils/updater" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// NnfNodeSpec defines the desired state of NNF Node -type NnfNodeSpec struct { - // Important: Run "make" to regenerate code after modifying this file - - // The unique name for this NNF Node - Name string `json:"name,omitempty"` - - // Pod name for this NNF Node - Pod string `json:"pod,omitempty"` - - // State reflects the desired state of this NNF Node resource - // +kubebuilder:validation:Enum=Enable;Disable - State NnfResourceStateType `json:"state"` -} - -// NnfNodeStatus defines the observed status of NNF Node -type NnfNodeStatus struct { - // Important: Run "make" to regenerate code after modifying this file - - // Status reflects the current status of the NNF Node - Status NnfResourceStatusType `json:"status,omitempty"` - - Health NnfResourceHealthType `json:"health,omitempty"` - - // Fenced is true when the NNF Node is fenced by the STONITH agent, and false otherwise. - Fenced bool `json:"fenced,omitempty"` - - // LNetNid is the LNet address for the NNF node - LNetNid string `json:"lnetNid,omitempty"` - - Capacity int64 `json:"capacity,omitempty"` - CapacityAllocated int64 `json:"capacityAllocated,omitempty"` - - Servers []NnfServerStatus `json:"servers,omitempty"` - - Drives []NnfDriveStatus `json:"drives,omitempty"` -} - -// NnfServerStatus defines the observed status of servers connected to this NNF Node -type NnfServerStatus struct { - Hostname string `json:"hostname,omitempty"` - - NnfResourceStatus `json:",inline"` -} - -// NnfDriveStatus defines the observe status of drives connected to this NNF Node -type NnfDriveStatus struct { - // Model is the manufacturer information about the device - Model string `json:"model,omitempty"` - - // The serial number for this storage controller. - SerialNumber string `json:"serialNumber,omitempty"` - - // The firmware version of this storage controller. - FirmwareVersion string `json:"firmwareVersion,omitempty"` - - // Physical slot location of the storage controller. - Slot string `json:"slot,omitempty"` - - // Capacity in bytes of the device. The full capacity may not - // be usable depending on what the storage driver can provide. - Capacity int64 `json:"capacity,omitempty"` - - // WearLevel in percent for SSDs - WearLevel int64 `json:"wearLevel,omitempty"` - - NnfResourceStatus `json:",inline"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.state",description="Current desired state" -//+kubebuilder:printcolumn:name="HEALTH",type="string",JSONPath=".status.health",description="Health of node" -//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Current status of node" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -//+kubebuilder:printcolumn:name="POD",type="string",JSONPath=".spec.pod",description="Parent pod name",priority=1 - -// NnfNode is the Schema for the NnfNode API -type NnfNode struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfNodeSpec `json:"spec,omitempty"` - Status NnfNodeStatus `json:"status,omitempty"` -} - -func (n *NnfNode) GetStatus() updater.Status[*NnfNodeStatus] { - return &n.Status -} - -//+kubebuilder:object:root=true - -// NnfNodeList contains a list of NNF Nodes -type NnfNodeList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfNode `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfNode{}, &NnfNodeList{}) -} diff --git a/api/v1alpha1/nnfnodeblockstorage_types.go b/api/v1alpha1/nnfnodeblockstorage_types.go deleted file mode 100644 index 3a84977d..00000000 --- a/api/v1alpha1/nnfnodeblockstorage_types.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type NnfNodeBlockStorageAllocationSpec struct { - // Aggregate capacity of the block devices for each allocation - Capacity int64 `json:"capacity,omitempty"` - - // List of nodes where /dev devices should be created - Access []string `json:"access,omitempty"` -} - -// NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. -// Storage spec are created on request of the user and fullfilled by the NNF Node Controller. -type NnfNodeBlockStorageSpec struct { - // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations - SharedAllocation bool `json:"sharedAllocation"` - - // Allocations is the list of storage allocations to make - Allocations []NnfNodeBlockStorageAllocationSpec `json:"allocations,omitempty"` -} - -type NnfNodeBlockStorageStatus struct { - // Allocations is the list of storage allocations that were made - Allocations []NnfNodeBlockStorageAllocationStatus `json:"allocations,omitempty"` - - dwsv1alpha2.ResourceError `json:",inline"` - - // PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did - // last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths - // listed in the status section are from the current boot of the node. - PodStartTime metav1.Time `json:"podStartTime,omitempty"` - - Ready bool `json:"ready"` -} - -type NnfNodeBlockStorageDeviceStatus struct { - // NQN of the base NVMe device - NQN string `json:"NQN"` - - // Id of the Namespace on the NVMe device (e.g., "2") - NamespaceId string `json:"namespaceId"` - - // Total capacity allocated for the storage. This may differ from the requested storage - // capacity as the system may round up to the requested capacity to satisify underlying - // storage requirements (i.e. block size / stripe size). - CapacityAllocated int64 `json:"capacityAllocated,omitempty"` -} - -type NnfNodeBlockStorageAccessStatus struct { - // /dev paths for each of the block devices - DevicePaths []string `json:"devicePaths,omitempty"` - - // Redfish ID for the storage group - StorageGroupId string `json:"storageGroupId,omitempty"` -} - -type NnfNodeBlockStorageAllocationStatus struct { - // Accesses is a map of node name to the access status - Accesses map[string]NnfNodeBlockStorageAccessStatus `json:"accesses,omitempty"` - - // List of NVMe namespaces used by this allocation - Devices []NnfNodeBlockStorageDeviceStatus `json:"devices,omitempty"` - - // Total capacity allocated for the storage. This may differ from the requested storage - // capacity as the system may round up to the requested capacity to satisify underlying - // storage requirements (i.e. block size / stripe size). - CapacityAllocated int64 `json:"capacityAllocated,omitempty"` - - // Redfish ID for the storage pool - StoragePoolId string `json:"storagePoolId,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" -// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -type NnfNodeBlockStorage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfNodeBlockStorageSpec `json:"spec,omitempty"` - Status NnfNodeBlockStorageStatus `json:"status,omitempty"` -} - -func (ns *NnfNodeBlockStorage) GetStatus() updater.Status[*NnfNodeBlockStorageStatus] { - return &ns.Status -} - -// +kubebuilder:object:root=true - -// NnfNodeBlockStorageList contains a list of NNF Nodes -type NnfNodeBlockStorageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfNodeBlockStorage `json:"items"` -} - -func (n *NnfNodeBlockStorageList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfNodeBlockStorage{}, &NnfNodeBlockStorageList{}) -} diff --git a/api/v1alpha1/nnfnodeecdata_types.go b/api/v1alpha1/nnfnodeecdata_types.go deleted file mode 100644 index ede24f2e..00000000 --- a/api/v1alpha1/nnfnodeecdata_types.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// NnfNodeECDataSpec defines the desired state of NnfNodeECData -type NnfNodeECDataSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -// NnfNodeECDataStatus defines the observed state of NnfNodeECData -type NnfNodeECDataStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - Data map[string]NnfNodeECPrivateData `json:"data,omitempty"` -} - -type NnfNodeECPrivateData map[string]string - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status - -// NnfNodeECData is the Schema for the nnfnodeecdata API -type NnfNodeECData struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfNodeECDataSpec `json:"spec,omitempty"` - Status NnfNodeECDataStatus `json:"status,omitempty"` -} - -//+kubebuilder:object:root=true - -// NnfNodeECDataList contains a list of NnfNodeECData -type NnfNodeECDataList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfNodeECData `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfNodeECData{}, &NnfNodeECDataList{}) -} diff --git a/api/v1alpha1/nnfnodestorage_types.go b/api/v1alpha1/nnfnodestorage_types.go deleted file mode 100644 index ef4fbf9f..00000000 --- a/api/v1alpha1/nnfnodestorage_types.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// IMPORTANT: Run "make" to regenerate code after modifying this file -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. -// Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. -type NnfNodeStorageSpec struct { - // Count is the number of allocations to make on this node. All of the allocations will - // be created with the same parameters - // +kubebuilder:validation:Minimum:=0 - Count int `json:"count"` - - // SharedAllocation is used when a single NnfNodeBlockStorage allocation is used by multiple NnfNodeStorage allocations - SharedAllocation bool `json:"sharedAllocation"` - - // Capacity of an individual allocation - Capacity int64 `json:"capacity,omitempty"` - - // User ID for file system - UserID uint32 `json:"userID"` - - // Group ID for file system - GroupID uint32 `json:"groupID"` - - // FileSystemType defines the type of the desired filesystem, or raw - // block device. - // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre - // +kubebuilder:default:=raw - FileSystemType string `json:"fileSystemType,omitempty"` - - // LustreStorageSpec describes the Lustre target created here, if - // FileSystemType specifies a Lustre target. - LustreStorage LustreStorageSpec `json:"lustreStorage,omitempty"` - - // BlockReference is an object reference to an NnfNodeBlockStorage - BlockReference corev1.ObjectReference `json:"blockReference,omitempty"` -} - -// LustreStorageSpec describes the Lustre target to be created here. -type LustreStorageSpec struct { - // FileSystemName is the fsname parameter for the Lustre filesystem. - // +kubebuilder:validation:MaxLength:=8 - FileSystemName string `json:"fileSystemName,omitempty"` - - // TargetType is the type of Lustre target to be created. - // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost - TargetType string `json:"targetType,omitempty"` - - // StartIndex is used to order a series of MDTs or OSTs. This is used only - // when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more - // than 1, then StartIndex is the index of the first allocation, and the indexes - // increment from there. - // +kubebuilder:validation:Minimum:=0 - StartIndex int `json:"startIndex,omitempty"` - - // MgsAddress is the NID of the MGS to use. This is used only when - // creating MDT and OST targets. - MgsAddress string `json:"mgsAddress,omitempty"` - - // BackFs is the type of backing filesystem to use. - // +kubebuilder:validation:Enum=ldiskfs;zfs - BackFs string `json:"backFs,omitempty"` -} - -// NnfNodeStorageStatus defines the status for NnfNodeStorage -type NnfNodeStorageStatus struct { - // Allocations is the list of storage allocations that were made - Allocations []NnfNodeStorageAllocationStatus `json:"allocations,omitempty"` - - Ready bool `json:"ready,omitempty"` - - dwsv1alpha2.ResourceError `json:",inline"` -} - -// NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage -type NnfNodeStorageAllocationStatus struct { - // Name of the LVM VG - VolumeGroup string `json:"volumeGroup,omitempty"` - - // Name of the LVM LV - LogicalVolume string `json:"logicalVolume,omitempty"` - - Ready bool `json:"ready,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" -// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// NnfNodeStorage is the Schema for the NnfNodeStorage API -type NnfNodeStorage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfNodeStorageSpec `json:"spec,omitempty"` - Status NnfNodeStorageStatus `json:"status,omitempty"` -} - -func (ns *NnfNodeStorage) GetStatus() updater.Status[*NnfNodeStorageStatus] { - return &ns.Status -} - -//+kubebuilder:object:root=true - -// NnfNodeStorageList contains a list of NNF Nodes -type NnfNodeStorageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfNodeStorage `json:"items"` -} - -func (n *NnfNodeStorageList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfNodeStorage{}, &NnfNodeStorageList{}) -} diff --git a/api/v1alpha1/nnfportmanager_types.go b/api/v1alpha1/nnfportmanager_types.go deleted file mode 100644 index 0685330e..00000000 --- a/api/v1alpha1/nnfportmanager_types.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// NnfPortManagerAllocationSpec defines the desired state for a single port allocation -type NnfPortManagerAllocationSpec struct { - // Requester is an object reference to the requester of a ports. - Requester corev1.ObjectReference `json:"requester"` - - // Count is the number of desired ports the requester needs. The port manager - // will attempt to allocate this many ports. - // +kubebuilder:default:=1 - Count int `json:"count"` -} - -// NnfPortManagerSpec defines the desired state of NnfPortManager -type NnfPortManagerSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // SystemConfiguration is an object reference to the system configuration. The - // Port Manager will use the available ports defined in the system configuration. - SystemConfiguration corev1.ObjectReference `json:"systemConfiguration"` - - // Allocations is a list of allocation requests that the Port Manager will attempt - // to satisfy. To request port resources from the port manager, clients should add - // an entry to the allocations. Entries must be unique. The port manager controller - // will attempt to allocate port resources for each allocation specification in the - // list. To remove an allocation and free up port resources, remove the allocation - // from the list. - Allocations []NnfPortManagerAllocationSpec `json:"allocations"` -} - -// AllocationStatus is the current status of a port requestor. A port that is in use by the respective owner -// will have a status of "InUse". A port that is freed by the owner but not yet reclaimed by the port manager -// will have a status of "Free". Any other status value indicates a failure of the port allocation. -// +kubebuilder:validation:Enum:=InUse;Free;Cooldown;InvalidConfiguration;InsufficientResources -type NnfPortManagerAllocationStatusStatus string - -const ( - NnfPortManagerAllocationStatusInUse NnfPortManagerAllocationStatusStatus = "InUse" - NnfPortManagerAllocationStatusFree NnfPortManagerAllocationStatusStatus = "Free" - NnfPortManagerAllocationStatusCooldown NnfPortManagerAllocationStatusStatus = "Cooldown" - NnfPortManagerAllocationStatusInvalidConfiguration NnfPortManagerAllocationStatusStatus = "InvalidConfiguration" - NnfPortManagerAllocationStatusInsufficientResources NnfPortManagerAllocationStatusStatus = "InsufficientResources" - // NOTE: You must ensure any new value is added to the above kubebuilder validation enum -) - -// NnfPortManagerAllocationStatus defines the allocation status of a port for a given requester. -type NnfPortManagerAllocationStatus struct { - // Requester is an object reference to the requester of the port resource, if one exists, or - // empty otherwise. - Requester *corev1.ObjectReference `json:"requester,omitempty"` - - // Ports is list of ports allocated to the owning resource. - Ports []uint16 `json:"ports,omitempty"` - - // Status is the ownership status of the port. - Status NnfPortManagerAllocationStatusStatus `json:"status"` - - // TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown - // duration. - TimeUnallocated *metav1.Time `json:"timeUnallocated,omitempty"` -} - -// PortManagerStatus is the current status of the port manager. -// +kubebuilder:validation:Enum:=Ready;SystemConfigurationNotFound -type NnfPortManagerStatusStatus string - -const ( - NnfPortManagerStatusReady NnfPortManagerStatusStatus = "Ready" - NnfPortManagerStatusSystemConfigurationNotFound NnfPortManagerStatusStatus = "SystemConfigurationNotFound" - // NOTE: You must ensure any new value is added in the above kubebuilder validation enum -) - -// NnfPortManagerStatus defines the observed state of NnfPortManager -type NnfPortManagerStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Allocations is a list of port allocation status'. - Allocations []NnfPortManagerAllocationStatus `json:"allocations,omitempty"` - - // Status is the current status of the port manager. - Status NnfPortManagerStatusStatus `json:"status"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status - -// NnfPortManager is the Schema for the nnfportmanagers API -type NnfPortManager struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfPortManagerSpec `json:"spec,omitempty"` - Status NnfPortManagerStatus `json:"status,omitempty"` -} - -func (mgr *NnfPortManager) GetStatus() updater.Status[*NnfPortManagerStatus] { - return &mgr.Status -} - -//+kubebuilder:object:root=true - -// NnfPortManagerList contains a list of NnfPortManager -type NnfPortManagerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfPortManager `json:"items"` -} - -func init() { - SchemeBuilder.Register(&NnfPortManager{}, &NnfPortManagerList{}) -} diff --git a/api/v1alpha1/nnfstorage_types.go b/api/v1alpha1/nnfstorage_types.go deleted file mode 100644 index 93d9f512..00000000 --- a/api/v1alpha1/nnfstorage_types.go +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - AllocationSetLabel = "nnf.cray.hpe.com/allocationset" -) - -// NnfStorageAllocationNodes identifies the node and properties of the allocation to make on that node -type NnfStorageAllocationNodes struct { - // Name of the node to make the allocation on - Name string `json:"name"` - - // Number of allocations to make on this node - Count int `json:"count"` -} - -// NnfStorageLustreSpec defines the specifications for a Lustre filesystem -type NnfStorageLustreSpec struct { - // TargetType is the type of Lustre target to be created. - // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost - TargetType string `json:"targetType,omitempty"` - - // BackFs is the type of backing filesystem to use. - // +kubebuilder:validation:Enum=ldiskfs;zfs - BackFs string `json:"backFs,omitempty"` - - // MgsAddress is the NID of the MGS when a pre-existing MGS is - // provided in the NnfStorageProfile - MgsAddress string `json:"mgsAddress,omitempty"` - - // PersistentMgsReference is a reference to a persistent storage that is providing - // the external MGS. - PersistentMgsReference corev1.ObjectReference `json:"persistentMgsReference,omitempty"` -} - -// NnfStorageAllocationSetSpec defines the details for an allocation set -type NnfStorageAllocationSetSpec struct { - // Name is a human readable label for this set of allocations (e.g., xfs) - Name string `json:"name"` - - // Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself - // may split the storage among the available drives operating in the NNF Node. - Capacity int64 `json:"capacity"` - - // Lustre specific configuration - NnfStorageLustreSpec `json:",inline"` - - // SharedAllocation shares a single block storage allocation between multiple file system allocations - // (within the same workflow) on a Rabbit - SharedAllocation bool `json:"sharedAllocation"` - - // Nodes is the list of Rabbit nodes to make allocations on - Nodes []NnfStorageAllocationNodes `json:"nodes"` -} - -// NnfStorageSpec defines the specification for requesting generic storage on a set -// of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM -// making the determination for which NNF Nodes it wants to utilize. -type NnfStorageSpec struct { - - // FileSystemType defines the type of the desired filesystem, or raw - // block device. - // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre - // +kubebuilder:default:=raw - FileSystemType string `json:"fileSystemType,omitempty"` - - // User ID for file system - UserID uint32 `json:"userID"` - - // Group ID for file system - GroupID uint32 `json:"groupID"` - - // AllocationSets is a list of different types of storage allocations to make. Each - // AllocationSet describes an entire allocation spanning multiple Rabbits. For example, - // an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw - // block devices in a raw block configuration. - AllocationSets []NnfStorageAllocationSetSpec `json:"allocationSets"` -} - -// NnfStorageAllocationSetStatus contains the status information for an allocation set -type NnfStorageAllocationSetStatus struct { - Ready bool `json:"ready,omitempty"` - - // AllocationCount is the total number of allocations that currently - // exist - AllocationCount int `json:"allocationCount"` -} - -type NnfStorageLustreStatus struct { - // MgsAddress is the NID of the MGS. - MgsAddress string `json:"mgsAddress,omitempty"` - - // FileSystemName is the fsname parameter for the Lustre filesystem. - // +kubebuilder:validation:MaxLength:=8 - FileSystemName string `json:"fileSystemName,omitempty"` - - // LustgreMgtReference is an object reference to the NnfLustreMGT resource used - // by the NnfStorage - LustreMgtReference corev1.ObjectReference `json:"lustreMgtReference,omitempty"` -} - -// NnfStorageStatus defines the observed status of NNF Storage. -type NnfStorageStatus struct { - NnfStorageLustreStatus `json:",inline"` - - // AllocationsSets holds the status information for each of the AllocationSets - // from the spec. - AllocationSets []NnfStorageAllocationSetStatus `json:"allocationSets,omitempty"` - - dwsv1alpha2.ResourceError `json:",inline"` - - // Ready reflects the status of this NNF Storage - Ready bool `json:"ready,omitempty"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" - -// NnfStorage is the Schema for the storages API -type NnfStorage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfStorageSpec `json:"spec,omitempty"` - Status NnfStorageStatus `json:"status,omitempty"` -} - -func (s *NnfStorage) GetStatus() updater.Status[*NnfStorageStatus] { - return &s.Status -} - -//+kubebuilder:object:root=true - -// NnfStorageList contains a list of Storage -type NnfStorageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfStorage `json:"items"` -} - -func (n *NnfStorageList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfStorage{}, &NnfStorageList{}) -} diff --git a/api/v1alpha1/nnfstorageprofile_types.go b/api/v1alpha1/nnfstorageprofile_types.go deleted file mode 100644 index fb5ab091..00000000 --- a/api/v1alpha1/nnfstorageprofile_types.go +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright 2022 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NnfStorageProfileLustreCmdLines defines commandlines to use for mkfs, zpool, and other utilities -// for Lustre allocations. -type NnfStorageProfileLustreCmdLines struct { - // ZpoolCreate specifies the zpool create commandline, minus the "zpool create". - // This is where you may specify zpool create options, and the virtual device (vdev) such as - // "mirror", or "draid". See zpoolconcepts(7). - ZpoolCreate string `json:"zpoolCreate,omitempty"` - - // Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". - // Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). - // Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. - Mkfs string `json:"mkfs,omitempty"` - - // MountTarget specifies the mount command line for the lustre target. - // For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions - // argument to mkfs.lustre instead. - MountTarget string `json:"mountTarget,omitempty"` -} - -// NnfStorageProfileLustreMiscOptions defines options to use for the mount library, and other utilities. -type NnfStorageProfileLustreMiscOptions struct { - // ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection - // to the compute nodes in a workflow - // +kubebuilder:default:=false - ColocateComputes bool `json:"colocateComputes"` - - // Count specifies how many Lustre targets to create - // +kubebuilder:validation:Minimum:=1 - Count int `json:"count,omitempty"` - - // Scale provides a unitless value to determine how many Lustre targets to create - // +kubebuilder:validation:Minimum:=1 - // +kubebuilder:validation:Maximum:=10 - Scale int `json:"scale,omitempty"` - - // Storagelabels defines a list of labels that are added to the DirectiveBreakdown - // labels constraint. This restricts allocations to Storage resources with these labels - StorageLabels []string `json:"storageLabels,omitempty"` -} - -// NnfStorageProfileLustreData defines the Lustre-specific configuration -type NnfStorageProfileLustreData struct { - // CombinedMGTMDT indicates whether the MGT and MDT should be created on the same target device - // +kubebuilder:default:=false - CombinedMGTMDT bool `json:"combinedMgtMdt,omitempty"` - - // ExternalMGS specifies the use of an existing MGS rather than creating one. This can - // be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent - // Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format - // is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will - // be picked from the pool. - ExternalMGS string `json:"externalMgs,omitempty"` - - // CapacityMGT specifies the size of the MGT device. - // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" - // +kubebuilder:default:="5GiB" - CapacityMGT string `json:"capacityMgt,omitempty"` - - // CapacityMDT specifies the size of the MDT device. This is also - // used for a combined MGT+MDT device. - // +kubebuilder:validation:Pattern:="^\\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$" - // +kubebuilder:default:="5GiB" - CapacityMDT string `json:"capacityMdt,omitempty"` - - // ExclusiveMDT indicates that the MDT should not be colocated with any other target on the chosen server. - // +kubebuilder:default:=false - ExclusiveMDT bool `json:"exclusiveMdt,omitempty"` - - // CapacityScalingFactor is a scaling factor for the OST capacity requested in the DirectiveBreakdown - // +kubebuilder:default:="1.0" - CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` - - // StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating - // a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. - // Multiple pools can be created. - StandaloneMGTPoolName string `json:"standaloneMgtPoolName,omitempty"` - - // MgtCmdLines contains commands to create an MGT target. - MgtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtCommandlines,omitempty"` - - // MdtCmdLines contains commands to create an MDT target. - MdtCmdLines NnfStorageProfileLustreCmdLines `json:"mdtCommandlines,omitempty"` - - // MgtMdtCmdLines contains commands to create a combined MGT/MDT target. - MgtMdtCmdLines NnfStorageProfileLustreCmdLines `json:"mgtMdtCommandlines,omitempty"` - - // OstCmdLines contains commands to create an OST target. - OstCmdLines NnfStorageProfileLustreCmdLines `json:"ostCommandlines,omitempty"` - - // MgtOptions contains options to use for libraries used for an MGT target. - MgtOptions NnfStorageProfileLustreMiscOptions `json:"mgtOptions,omitempty"` - - // MdtOptions contains options to use for libraries used for an MDT target. - MdtOptions NnfStorageProfileLustreMiscOptions `json:"mdtOptions,omitempty"` - - // MgtMdtOptions contains options to use for libraries used for a combined MGT/MDT target. - MgtMdtOptions NnfStorageProfileLustreMiscOptions `json:"mgtMdtOptions,omitempty"` - - // OstOptions contains options to use for libraries used for an OST target. - OstOptions NnfStorageProfileLustreMiscOptions `json:"ostOptions,omitempty"` - - // MountRabbit specifies mount options for making the Lustre client mount on the Rabbit. - MountRabbit string `json:"mountRabbit,omitempty"` - - // MountCompute specifies mount options for making the Lustre client mount on the Compute. - MountCompute string `json:"mountCompute,omitempty"` -} - -// NnfStorageProfileCmdLines defines commandlines to use for mkfs, and other utilities for storage -// allocations that use LVM and a simple file system type (e.g., gfs2) -type NnfStorageProfileCmdLines struct { - // Mkfs specifies the mkfs commandline, minus the "mkfs". - Mkfs string `json:"mkfs,omitempty"` - - // SharedVg specifies that allocations from a workflow on the same Rabbit should share an - // LVM VolumeGroup - // +kubebuilder:default:=false - SharedVg bool `json:"sharedVg,omitempty"` - - // PvCreate specifies the pvcreate commandline, minus the "pvcreate". - PvCreate string `json:"pvCreate,omitempty"` - - // PvRemove specifies the pvremove commandline, minus the "pvremove". - PvRemove string `json:"pvRemove,omitempty"` - - // VgCreate specifies the vgcreate commandline, minus the "vgcreate". - VgCreate string `json:"vgCreate,omitempty"` - - // VgChange specifies the various vgchange commandlines, minus the "vgchange" - VgChange NnfStorageProfileLVMVgChangeCmdLines `json:"vgChange,omitempty"` - - // VgCreate specifies the vgcreate commandline, minus the "vgremove". - VgRemove string `json:"vgRemove,omitempty"` - - // LvCreate specifies the lvcreate commandline, minus the "lvcreate". - LvCreate string `json:"lvCreate,omitempty"` - - // LvChange specifies the various lvchange commandlines, minus the "lvchange" - LvChange NnfStorageProfileLVMLvChangeCmdLines `json:"lvChange,omitempty"` - - // LvRemove specifies the lvcreate commandline, minus the "lvremove". - LvRemove string `json:"lvRemove,omitempty"` - - // MountRabbit specifies mount options for mounting on the Rabbit. - MountRabbit string `json:"mountRabbit,omitempty"` - - // MountCompute specifies mount options for mounting on the Compute. - MountCompute string `json:"mountCompute,omitempty"` -} - -// NnfStorageProfileLVMVgChangeCmdLines -type NnfStorageProfileLVMVgChangeCmdLines struct { - // The vgchange commandline for lockStart, minus the "vgchange" command - LockStart string `json:"lockStart,omitempty"` - - // The vgchange commandline for lockStop, minus the "vgchange" command - LockStop string `json:"lockStop,omitempty"` -} - -// NnfStorageProfileLVMVgChangeCmdLines -type NnfStorageProfileLVMLvChangeCmdLines struct { - // The lvchange commandline for activate, minus the "lvchange" command - Activate string `json:"activate,omitempty"` - - // The lvchange commandline for deactivate, minus the "lvchange" command - Deactivate string `json:"deactivate,omitempty"` -} - -// NnfStorageProfileGFS2Data defines the GFS2-specific configuration -type NnfStorageProfileGFS2Data struct { - // CmdLines contains commands to create volumes and filesystems. - CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` - - // Storagelabels defines a list of labels that are added to the DirectiveBreakdown - // labels constraint. This restricts allocations to Storage resources with these labels - StorageLabels []string `json:"storageLabels,omitempty"` - - // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown - // +kubebuilder:default:="1.0" - CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` -} - -// NnfStorageProfileXFSData defines the XFS-specific configuration -type NnfStorageProfileXFSData struct { - // CmdLines contains commands to create volumes and filesystems. - CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` - - // Storagelabels defines a list of labels that are added to the DirectiveBreakdown - // labels constraint. This restricts allocations to Storage resources with these labels - StorageLabels []string `json:"storageLabels,omitempty"` - - // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown - // +kubebuilder:default:="1.0" - CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` -} - -// NnfStorageProfileRawData defines the Raw-specific configuration -type NnfStorageProfileRawData struct { - // CmdLines contains commands to create volumes and filesystems. - CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` - - // Storagelabels defines a list of labels that are added to the DirectiveBreakdown - // labels constraint. This restricts allocations to Storage resources with these labels - StorageLabels []string `json:"storageLabels,omitempty"` - - // CapacityScalingFactor is a scaling factor for the capacity requested in the DirectiveBreakdown - // +kubebuilder:default:="1.0" - CapacityScalingFactor string `json:"capacityScalingFactor,omitempty"` -} - -// NnfStorageProfileData defines the desired state of NnfStorageProfile -type NnfStorageProfileData struct { - - // Default is true if this instance is the default resource to use - // +kubebuilder:default:=false - Default bool `json:"default,omitempty"` - - // Pinned is true if this instance is an immutable copy - // +kubebuilder:default:=false - Pinned bool `json:"pinned,omitempty"` - - // LustreStorage defines the Lustre-specific configuration - LustreStorage NnfStorageProfileLustreData `json:"lustreStorage"` - - // GFS2Storage defines the GFS2-specific configuration - GFS2Storage NnfStorageProfileGFS2Data `json:"gfs2Storage"` - - // XFSStorage defines the XFS-specific configuration - XFSStorage NnfStorageProfileXFSData `json:"xfsStorage"` - - // RawStorage defines the Raw-specific configuration - RawStorage NnfStorageProfileRawData `json:"rawStorage"` -} - -//+kubebuilder:object:root=true -//+kubebuilder:unservedversion -//+kubebuilder:printcolumn:name="DEFAULT",type="boolean",JSONPath=".data.default",description="True if this is the default instance" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" - -// NnfStorageProfile is the Schema for the nnfstorageprofiles API -type NnfStorageProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Data NnfStorageProfileData `json:"data,omitempty"` -} - -//+kubebuilder:object:root=true - -// NnfStorageProfileList contains a list of NnfStorageProfile -type NnfStorageProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfStorageProfile `json:"items"` -} - -func (n *NnfStorageProfile) GetLustreMiscOptions(target string) NnfStorageProfileLustreMiscOptions { - switch target { - case "mgt": - return n.Data.LustreStorage.MgtOptions - case "mdt": - return n.Data.LustreStorage.MdtOptions - case "mgtmdt": - return n.Data.LustreStorage.MgtMdtOptions - case "ost": - return n.Data.LustreStorage.OstOptions - default: - panic("Invalid target type") - } -} - -func (n *NnfStorageProfileList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfStorageProfile{}, &NnfStorageProfileList{}) -} diff --git a/api/v1alpha1/nnfsystemstorage_types.go b/api/v1alpha1/nnfsystemstorage_types.go deleted file mode 100644 index 2f2455b2..00000000 --- a/api/v1alpha1/nnfsystemstorage_types.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - "github.com/DataWorkflowServices/dws/utils/updater" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type NnfSystemStorageComputesTarget string - -const ( - ComputesTargetAll NnfSystemStorageComputesTarget = "all" - ComputesTargetEven NnfSystemStorageComputesTarget = "even" - ComputesTargetOdd NnfSystemStorageComputesTarget = "odd" - ComputesTargetPattern NnfSystemStorageComputesTarget = "pattern" -) - -// NnfSystemStorageSpec defines the desired state of NnfSystemStorage -type NnfSystemStorageSpec struct { - // SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this - // field is empty, name: default namespace: default is used. - SystemConfiguration corev1.ObjectReference `json:"systemConfiguration,omitempty"` - - // ExludeRabbits is a list of Rabbits to exclude from the Rabbits in the SystemConfiguration - ExcludeRabbits []string `json:"excludeRabbits,omitempty"` - - // IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the - // SystemConfiguration - IncludeRabbits []string `json:"includeRabbits,omitempty"` - - // ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the - // SystemConfiguration - ExcludeComputes []string `json:"excludeComputes,omitempty"` - - // IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes - // from the SystemConfiguration - IncludeComputes []string `json:"includeComputes,omitempty"` - - // ComputesTarget specifies which computes to make the storage accessible to - // +kubebuilder:validation:Enum=all;even;odd;pattern - // +kubebuilder:default:=all - ComputesTarget NnfSystemStorageComputesTarget `json:"computesTarget,omitempty"` - - // ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This - // is only used if ComputesTarget is "pattern" - // +kubebuilder:validation:MaxItems=16 - // +kubebuilder:validation:items:Maximum=15 - // +kubebuilder:validation:items:Minimum=0 - ComputesPattern []int `json:"computesPattern,omitempty"` - - // Capacity is the allocation size on each Rabbit - // +kubebuilder:default:=1073741824 - Capacity int64 `json:"capacity"` - - // Type is the file system type to use for the storage allocation - // +kubebuilder:validation:Enum=raw;xfs;gfs2 - // +kubebuilder:default:=raw - Type string `json:"type,omitempty"` - - // StorageProfile is an object reference to the storage profile to use - StorageProfile corev1.ObjectReference `json:"storageProfile"` - - // MakeClientMounts specifies whether to make ClientMount resources or just - // make the devices available to the client - // +kubebuilder:default:=false - MakeClientMounts bool `json:"makeClientMounts"` - - // ClientMountPath is an optional path for where to mount the file system on the computes - ClientMountPath string `json:"clientMountPath,omitempty"` -} - -// NnfSystemStorageStatus defines the observed state of NnfSystemStorage -type NnfSystemStorageStatus struct { - // Ready signifies whether all work has been completed - Ready bool `json:"ready"` - - dwsv1alpha2.ResourceError `json:",inline"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:unservedversion -// +kubebuilder:subresource:status -// NnfSystemStorage is the Schema for the nnfsystemstorages API -type NnfSystemStorage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec NnfSystemStorageSpec `json:"spec,omitempty"` - Status NnfSystemStorageStatus `json:"status,omitempty"` -} - -func (a *NnfSystemStorage) GetStatus() updater.Status[*NnfSystemStorageStatus] { - return &a.Status -} - -// +kubebuilder:object:root=true -// NnfSystemStorageList contains a list of NnfSystemStorage -type NnfSystemStorageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NnfSystemStorage `json:"items"` -} - -func (n *NnfSystemStorageList) GetObjectList() []client.Object { - objectList := []client.Object{} - - for i := range n.Items { - objectList = append(objectList, &n.Items[i]) - } - - return objectList -} - -func init() { - SchemeBuilder.Register(&NnfSystemStorage{}, &NnfSystemStorageList{}) -} diff --git a/api/v1alpha1/workflow_helpers.go b/api/v1alpha1/workflow_helpers.go deleted file mode 100644 index 8da83595..00000000 --- a/api/v1alpha1/workflow_helpers.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2022-2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v1alpha1 - -const ( - // DirectiveIndexLabel is a label applied to child objects of the workflow - // to show which directive they were created for. This is useful during deletion - // to filter the child objects by the directive index and only delete the - // resources for the directive being processed - DirectiveIndexLabel = "nnf.cray.hpe.com/directive_index" - - // TargetDirectiveIndexLabel is used for ClientMount resources to indicate the - // directive index of the storage they're targeting. - TargetDirectiveIndexLabel = "nnf.cray.hpe.com/target_directive_index" - - // TargetOwnerUidLabel is used for ClientMount resources to indicate the UID of the - // parent NnfStorage it's targeting - TargetOwnerUidLabel = "nnf.cray.hpe.com/target_owner_uid" - - // PinnedStorageProfileLabelName is a label applied to NnfStorage objects to show - // which pinned storage profile is being used. - PinnedStorageProfileLabelName = "nnf.cray.hpe.com/pinned_storage_profile_name" - - // PinnedStorageProfileLabelNameSpace is a label applied to NnfStorage objects to show - // which pinned storage profile is being used. - PinnedStorageProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_storage_profile_namespace" - - // PinnedContainerProfileLabelName is a label applied to NnfStorage objects to show - // which pinned container profile is being used. - PinnedContainerProfileLabelName = "nnf.cray.hpe.com/pinned_container_profile_name" - - // PinnedContainerProfileLabelNameSpace is a label applied to NnfStorage objects to show - // which pinned container profile is being used. - PinnedContainerProfileLabelNameSpace = "nnf.cray.hpe.com/pinned_container_profile_namespace" - - // StandaloneMGTLabel is a label applied to the PersistentStorageInstance to show that - // it is for a Lustre MGT only. The value for the label is the pool name. - StandaloneMGTLabel = "nnf.cray.hpe.com/standalone_mgt" - - // RabbitNodeSelectorLabel is a label applied to each k8s Node that is a Rabbit. - // It is used for scheduling NLCs onto the rabbits. - // (This is left in its legacy form because so many existing services are - // using it in their nodeSelector.) - RabbitNodeSelectorLabel = "cray.nnf.node" - - // TaintsAndLabelsCompletedLabel is a label applied to each k8s Node that is a Rabbit. - // It is used to indicate that the node has completed the process of applying - // the taints and labels that mark it as a rabbit. - TaintsAndLabelsCompletedLabel = "nnf.cray.hpe.com/taints_and_labels_completed" - - // RabbitNodeTaintKey is a taint key applied to each k8s Node that is a Rabbit. - // It is used for scheduling NLCs onto the rabbits. - // (This is left in its legacy form to avoid having existing clusters, - // which already have this taint, grind to a halt.) - RabbitNodeTaintKey = "cray.nnf.node" -) diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index aede0f37..00000000 --- a/api/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,3230 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" - v1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" - v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*LustreStorageSpec)(nil), (*v1alpha4.LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(a.(*LustreStorageSpec), b.(*v1alpha4.LustreStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccess)(nil), (*NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(a.(*v1alpha4.NnfAccess), b.(*NnfAccess), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfAccessList)(nil), (*v1alpha4.NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(a.(*NnfAccessList), b.(*v1alpha4.NnfAccessList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessList)(nil), (*NnfAccessList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(a.(*v1alpha4.NnfAccessList), b.(*NnfAccessList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfAccessSpec)(nil), (*v1alpha4.NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(a.(*NnfAccessSpec), b.(*v1alpha4.NnfAccessSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessStatus)(nil), (*NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(a.(*v1alpha4.NnfAccessStatus), b.(*NnfAccessStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfile)(nil), (*v1alpha4.NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(a.(*NnfContainerProfile), b.(*v1alpha4.NnfContainerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfile)(nil), (*NnfContainerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(a.(*v1alpha4.NnfContainerProfile), b.(*NnfContainerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileData)(nil), (*v1alpha4.NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(a.(*NnfContainerProfileData), b.(*v1alpha4.NnfContainerProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileData)(nil), (*NnfContainerProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(a.(*v1alpha4.NnfContainerProfileData), b.(*NnfContainerProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileList)(nil), (*v1alpha4.NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(a.(*NnfContainerProfileList), b.(*v1alpha4.NnfContainerProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileList)(nil), (*NnfContainerProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(a.(*v1alpha4.NnfContainerProfileList), b.(*NnfContainerProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfContainerProfileStorage)(nil), (*v1alpha4.NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(a.(*NnfContainerProfileStorage), b.(*v1alpha4.NnfContainerProfileStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfContainerProfileStorage)(nil), (*NnfContainerProfileStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(a.(*v1alpha4.NnfContainerProfileStorage), b.(*NnfContainerProfileStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovement)(nil), (*v1alpha4.NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(a.(*NnfDataMovement), b.(*v1alpha4.NnfDataMovement), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovement)(nil), (*NnfDataMovement)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(a.(*v1alpha4.NnfDataMovement), b.(*NnfDataMovement), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementCommandStatus)(nil), (*v1alpha4.NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(a.(*NnfDataMovementCommandStatus), b.(*v1alpha4.NnfDataMovementCommandStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementCommandStatus)(nil), (*NnfDataMovementCommandStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(a.(*v1alpha4.NnfDataMovementCommandStatus), b.(*NnfDataMovementCommandStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementConfig)(nil), (*v1alpha4.NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(a.(*NnfDataMovementConfig), b.(*v1alpha4.NnfDataMovementConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementConfig)(nil), (*NnfDataMovementConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(a.(*v1alpha4.NnfDataMovementConfig), b.(*NnfDataMovementConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementList)(nil), (*v1alpha4.NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(a.(*NnfDataMovementList), b.(*v1alpha4.NnfDataMovementList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementList)(nil), (*NnfDataMovementList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(a.(*v1alpha4.NnfDataMovementList), b.(*NnfDataMovementList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManager)(nil), (*v1alpha4.NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(a.(*NnfDataMovementManager), b.(*v1alpha4.NnfDataMovementManager), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManager)(nil), (*NnfDataMovementManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(a.(*v1alpha4.NnfDataMovementManager), b.(*NnfDataMovementManager), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerList)(nil), (*v1alpha4.NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(a.(*NnfDataMovementManagerList), b.(*v1alpha4.NnfDataMovementManagerList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerList)(nil), (*NnfDataMovementManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(a.(*v1alpha4.NnfDataMovementManagerList), b.(*NnfDataMovementManagerList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerSpec)(nil), (*v1alpha4.NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(a.(*NnfDataMovementManagerSpec), b.(*v1alpha4.NnfDataMovementManagerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerSpec)(nil), (*NnfDataMovementManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(a.(*v1alpha4.NnfDataMovementManagerSpec), b.(*NnfDataMovementManagerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementManagerStatus)(nil), (*v1alpha4.NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(a.(*NnfDataMovementManagerStatus), b.(*v1alpha4.NnfDataMovementManagerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementManagerStatus)(nil), (*NnfDataMovementManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(a.(*v1alpha4.NnfDataMovementManagerStatus), b.(*NnfDataMovementManagerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfile)(nil), (*v1alpha4.NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(a.(*NnfDataMovementProfile), b.(*v1alpha4.NnfDataMovementProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfile)(nil), (*NnfDataMovementProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(a.(*v1alpha4.NnfDataMovementProfile), b.(*NnfDataMovementProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileData)(nil), (*v1alpha4.NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(a.(*NnfDataMovementProfileData), b.(*v1alpha4.NnfDataMovementProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileList)(nil), (*NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(a.(*v1alpha4.NnfDataMovementProfileList), b.(*NnfDataMovementProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpec)(nil), (*v1alpha4.NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(a.(*NnfDataMovementSpec), b.(*v1alpha4.NnfDataMovementSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpec)(nil), (*NnfDataMovementSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(a.(*v1alpha4.NnfDataMovementSpec), b.(*NnfDataMovementSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementSpecSourceDestination)(nil), (*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(a.(*NnfDataMovementSpecSourceDestination), b.(*v1alpha4.NnfDataMovementSpecSourceDestination), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementSpecSourceDestination)(nil), (*NnfDataMovementSpecSourceDestination)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(a.(*v1alpha4.NnfDataMovementSpecSourceDestination), b.(*NnfDataMovementSpecSourceDestination), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDataMovementStatus)(nil), (*v1alpha4.NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(a.(*NnfDataMovementStatus), b.(*v1alpha4.NnfDataMovementStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementStatus)(nil), (*NnfDataMovementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(a.(*v1alpha4.NnfDataMovementStatus), b.(*NnfDataMovementStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfDriveStatus)(nil), (*v1alpha4.NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(a.(*NnfDriveStatus), b.(*v1alpha4.NnfDriveStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDriveStatus)(nil), (*NnfDriveStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(a.(*v1alpha4.NnfDriveStatus), b.(*NnfDriveStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGT)(nil), (*v1alpha4.NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(a.(*NnfLustreMGT), b.(*v1alpha4.NnfLustreMGT), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGT)(nil), (*NnfLustreMGT)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(a.(*v1alpha4.NnfLustreMGT), b.(*NnfLustreMGT), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTList)(nil), (*v1alpha4.NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(a.(*NnfLustreMGTList), b.(*v1alpha4.NnfLustreMGTList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTList)(nil), (*NnfLustreMGTList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(a.(*v1alpha4.NnfLustreMGTList), b.(*NnfLustreMGTList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTSpec)(nil), (*v1alpha4.NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(a.(*NnfLustreMGTSpec), b.(*v1alpha4.NnfLustreMGTSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTSpec)(nil), (*NnfLustreMGTSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(a.(*v1alpha4.NnfLustreMGTSpec), b.(*NnfLustreMGTSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatus)(nil), (*v1alpha4.NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(a.(*NnfLustreMGTStatus), b.(*v1alpha4.NnfLustreMGTStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatus)(nil), (*NnfLustreMGTStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(a.(*v1alpha4.NnfLustreMGTStatus), b.(*NnfLustreMGTStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfLustreMGTStatusClaim)(nil), (*v1alpha4.NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(a.(*NnfLustreMGTStatusClaim), b.(*v1alpha4.NnfLustreMGTStatusClaim), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfLustreMGTStatusClaim)(nil), (*NnfLustreMGTStatusClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(a.(*v1alpha4.NnfLustreMGTStatusClaim), b.(*NnfLustreMGTStatusClaim), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNode)(nil), (*v1alpha4.NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(a.(*NnfNode), b.(*v1alpha4.NnfNode), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNode)(nil), (*NnfNode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(a.(*v1alpha4.NnfNode), b.(*NnfNode), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorage)(nil), (*v1alpha4.NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(a.(*NnfNodeBlockStorage), b.(*v1alpha4.NnfNodeBlockStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorage)(nil), (*NnfNodeBlockStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(a.(*v1alpha4.NnfNodeBlockStorage), b.(*NnfNodeBlockStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAccessStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(a.(*NnfNodeBlockStorageAccessStatus), b.(*v1alpha4.NnfNodeBlockStorageAccessStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAccessStatus)(nil), (*NnfNodeBlockStorageAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(a.(*v1alpha4.NnfNodeBlockStorageAccessStatus), b.(*NnfNodeBlockStorageAccessStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationSpec)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(a.(*NnfNodeBlockStorageAllocationSpec), b.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationSpec)(nil), (*NnfNodeBlockStorageAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(a.(*v1alpha4.NnfNodeBlockStorageAllocationSpec), b.(*NnfNodeBlockStorageAllocationSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(a.(*NnfNodeBlockStorageAllocationStatus), b.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageAllocationStatus)(nil), (*NnfNodeBlockStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(a.(*v1alpha4.NnfNodeBlockStorageAllocationStatus), b.(*NnfNodeBlockStorageAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageDeviceStatus)(nil), (*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(a.(*NnfNodeBlockStorageDeviceStatus), b.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageDeviceStatus)(nil), (*NnfNodeBlockStorageDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(a.(*v1alpha4.NnfNodeBlockStorageDeviceStatus), b.(*NnfNodeBlockStorageDeviceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageList)(nil), (*v1alpha4.NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(a.(*NnfNodeBlockStorageList), b.(*v1alpha4.NnfNodeBlockStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageList)(nil), (*NnfNodeBlockStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(a.(*v1alpha4.NnfNodeBlockStorageList), b.(*NnfNodeBlockStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageSpec)(nil), (*v1alpha4.NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(a.(*NnfNodeBlockStorageSpec), b.(*v1alpha4.NnfNodeBlockStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageSpec)(nil), (*NnfNodeBlockStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(a.(*v1alpha4.NnfNodeBlockStorageSpec), b.(*NnfNodeBlockStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeBlockStorageStatus)(nil), (*v1alpha4.NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(a.(*NnfNodeBlockStorageStatus), b.(*v1alpha4.NnfNodeBlockStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeBlockStorageStatus)(nil), (*NnfNodeBlockStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(a.(*v1alpha4.NnfNodeBlockStorageStatus), b.(*NnfNodeBlockStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeECData)(nil), (*v1alpha4.NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(a.(*NnfNodeECData), b.(*v1alpha4.NnfNodeECData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECData)(nil), (*NnfNodeECData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(a.(*v1alpha4.NnfNodeECData), b.(*NnfNodeECData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataList)(nil), (*v1alpha4.NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(a.(*NnfNodeECDataList), b.(*v1alpha4.NnfNodeECDataList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataList)(nil), (*NnfNodeECDataList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(a.(*v1alpha4.NnfNodeECDataList), b.(*NnfNodeECDataList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataSpec)(nil), (*v1alpha4.NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(a.(*NnfNodeECDataSpec), b.(*v1alpha4.NnfNodeECDataSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataSpec)(nil), (*NnfNodeECDataSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(a.(*v1alpha4.NnfNodeECDataSpec), b.(*NnfNodeECDataSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeECDataStatus)(nil), (*v1alpha4.NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(a.(*NnfNodeECDataStatus), b.(*v1alpha4.NnfNodeECDataStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeECDataStatus)(nil), (*NnfNodeECDataStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(a.(*v1alpha4.NnfNodeECDataStatus), b.(*NnfNodeECDataStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeList)(nil), (*v1alpha4.NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(a.(*NnfNodeList), b.(*v1alpha4.NnfNodeList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeList)(nil), (*NnfNodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(a.(*v1alpha4.NnfNodeList), b.(*NnfNodeList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeSpec)(nil), (*v1alpha4.NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(a.(*NnfNodeSpec), b.(*v1alpha4.NnfNodeSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeSpec)(nil), (*NnfNodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(a.(*v1alpha4.NnfNodeSpec), b.(*NnfNodeSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStatus)(nil), (*v1alpha4.NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(a.(*NnfNodeStatus), b.(*v1alpha4.NnfNodeStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStatus)(nil), (*NnfNodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(a.(*v1alpha4.NnfNodeStatus), b.(*NnfNodeStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorage)(nil), (*v1alpha4.NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(a.(*NnfNodeStorage), b.(*v1alpha4.NnfNodeStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorage)(nil), (*NnfNodeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(a.(*v1alpha4.NnfNodeStorage), b.(*NnfNodeStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageAllocationStatus)(nil), (*v1alpha4.NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(a.(*NnfNodeStorageAllocationStatus), b.(*v1alpha4.NnfNodeStorageAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageAllocationStatus)(nil), (*NnfNodeStorageAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(a.(*v1alpha4.NnfNodeStorageAllocationStatus), b.(*NnfNodeStorageAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageList)(nil), (*v1alpha4.NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(a.(*NnfNodeStorageList), b.(*v1alpha4.NnfNodeStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageList)(nil), (*NnfNodeStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(a.(*v1alpha4.NnfNodeStorageList), b.(*NnfNodeStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageSpec)(nil), (*v1alpha4.NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(a.(*NnfNodeStorageSpec), b.(*v1alpha4.NnfNodeStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageSpec)(nil), (*NnfNodeStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(a.(*v1alpha4.NnfNodeStorageSpec), b.(*NnfNodeStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfNodeStorageStatus)(nil), (*v1alpha4.NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(a.(*NnfNodeStorageStatus), b.(*v1alpha4.NnfNodeStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfNodeStorageStatus)(nil), (*NnfNodeStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(a.(*v1alpha4.NnfNodeStorageStatus), b.(*NnfNodeStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManager)(nil), (*v1alpha4.NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(a.(*NnfPortManager), b.(*v1alpha4.NnfPortManager), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManager)(nil), (*NnfPortManager)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(a.(*v1alpha4.NnfPortManager), b.(*NnfPortManager), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationSpec)(nil), (*v1alpha4.NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(a.(*NnfPortManagerAllocationSpec), b.(*v1alpha4.NnfPortManagerAllocationSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationSpec)(nil), (*NnfPortManagerAllocationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(a.(*v1alpha4.NnfPortManagerAllocationSpec), b.(*NnfPortManagerAllocationSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerAllocationStatus)(nil), (*v1alpha4.NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(a.(*NnfPortManagerAllocationStatus), b.(*v1alpha4.NnfPortManagerAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerAllocationStatus)(nil), (*NnfPortManagerAllocationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(a.(*v1alpha4.NnfPortManagerAllocationStatus), b.(*NnfPortManagerAllocationStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerList)(nil), (*v1alpha4.NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(a.(*NnfPortManagerList), b.(*v1alpha4.NnfPortManagerList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerList)(nil), (*NnfPortManagerList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(a.(*v1alpha4.NnfPortManagerList), b.(*NnfPortManagerList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerSpec)(nil), (*v1alpha4.NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(a.(*NnfPortManagerSpec), b.(*v1alpha4.NnfPortManagerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerSpec)(nil), (*NnfPortManagerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(a.(*v1alpha4.NnfPortManagerSpec), b.(*NnfPortManagerSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfPortManagerStatus)(nil), (*v1alpha4.NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(a.(*NnfPortManagerStatus), b.(*v1alpha4.NnfPortManagerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfPortManagerStatus)(nil), (*NnfPortManagerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(a.(*v1alpha4.NnfPortManagerStatus), b.(*NnfPortManagerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfResourceStatus)(nil), (*v1alpha4.NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(a.(*NnfResourceStatus), b.(*v1alpha4.NnfResourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfResourceStatus)(nil), (*NnfResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(a.(*v1alpha4.NnfResourceStatus), b.(*NnfResourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfServerStatus)(nil), (*v1alpha4.NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(a.(*NnfServerStatus), b.(*v1alpha4.NnfServerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfServerStatus)(nil), (*NnfServerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(a.(*v1alpha4.NnfServerStatus), b.(*NnfServerStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorage)(nil), (*v1alpha4.NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(a.(*NnfStorage), b.(*v1alpha4.NnfStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorage)(nil), (*NnfStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(a.(*v1alpha4.NnfStorage), b.(*NnfStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationNodes)(nil), (*v1alpha4.NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(a.(*NnfStorageAllocationNodes), b.(*v1alpha4.NnfStorageAllocationNodes), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationNodes)(nil), (*NnfStorageAllocationNodes)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(a.(*v1alpha4.NnfStorageAllocationNodes), b.(*NnfStorageAllocationNodes), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetSpec)(nil), (*v1alpha4.NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(a.(*NnfStorageAllocationSetSpec), b.(*v1alpha4.NnfStorageAllocationSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetSpec)(nil), (*NnfStorageAllocationSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(a.(*v1alpha4.NnfStorageAllocationSetSpec), b.(*NnfStorageAllocationSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageAllocationSetStatus)(nil), (*v1alpha4.NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(a.(*NnfStorageAllocationSetStatus), b.(*v1alpha4.NnfStorageAllocationSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageAllocationSetStatus)(nil), (*NnfStorageAllocationSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(a.(*v1alpha4.NnfStorageAllocationSetStatus), b.(*NnfStorageAllocationSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageList)(nil), (*v1alpha4.NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(a.(*NnfStorageList), b.(*v1alpha4.NnfStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageList)(nil), (*NnfStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(a.(*v1alpha4.NnfStorageList), b.(*NnfStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreSpec)(nil), (*v1alpha4.NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(a.(*NnfStorageLustreSpec), b.(*v1alpha4.NnfStorageLustreSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreSpec)(nil), (*NnfStorageLustreSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(a.(*v1alpha4.NnfStorageLustreSpec), b.(*NnfStorageLustreSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageLustreStatus)(nil), (*v1alpha4.NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(a.(*NnfStorageLustreStatus), b.(*v1alpha4.NnfStorageLustreStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfile)(nil), (*NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(a.(*v1alpha4.NnfStorageProfile), b.(*NnfStorageProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileCmdLines)(nil), (*v1alpha4.NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(a.(*NnfStorageProfileCmdLines), b.(*v1alpha4.NnfStorageProfileCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileData)(nil), (*v1alpha4.NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(a.(*NnfStorageProfileData), b.(*v1alpha4.NnfStorageProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileData)(nil), (*NnfStorageProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(a.(*v1alpha4.NnfStorageProfileData), b.(*NnfStorageProfileData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileGFS2Data)(nil), (*v1alpha4.NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(a.(*NnfStorageProfileGFS2Data), b.(*v1alpha4.NnfStorageProfileGFS2Data), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileGFS2Data)(nil), (*NnfStorageProfileGFS2Data)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(a.(*v1alpha4.NnfStorageProfileGFS2Data), b.(*NnfStorageProfileGFS2Data), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMLvChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(a.(*NnfStorageProfileLVMLvChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines)(nil), (*NnfStorageProfileLVMLvChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMLvChangeCmdLines), b.(*NnfStorageProfileLVMLvChangeCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLVMVgChangeCmdLines)(nil), (*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(a.(*NnfStorageProfileLVMVgChangeCmdLines), b.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines)(nil), (*NnfStorageProfileLVMVgChangeCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(a.(*v1alpha4.NnfStorageProfileLVMVgChangeCmdLines), b.(*NnfStorageProfileLVMVgChangeCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileList)(nil), (*v1alpha4.NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(a.(*NnfStorageProfileList), b.(*v1alpha4.NnfStorageProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileList)(nil), (*NnfStorageProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(a.(*v1alpha4.NnfStorageProfileList), b.(*NnfStorageProfileList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreCmdLines)(nil), (*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(a.(*NnfStorageProfileLustreCmdLines), b.(*v1alpha4.NnfStorageProfileLustreCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreData)(nil), (*v1alpha4.NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(a.(*NnfStorageProfileLustreData), b.(*v1alpha4.NnfStorageProfileLustreData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreData)(nil), (*NnfStorageProfileLustreData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(a.(*v1alpha4.NnfStorageProfileLustreData), b.(*NnfStorageProfileLustreData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileLustreMiscOptions)(nil), (*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(a.(*NnfStorageProfileLustreMiscOptions), b.(*v1alpha4.NnfStorageProfileLustreMiscOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileLustreMiscOptions)(nil), (*NnfStorageProfileLustreMiscOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(a.(*v1alpha4.NnfStorageProfileLustreMiscOptions), b.(*NnfStorageProfileLustreMiscOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileRawData)(nil), (*v1alpha4.NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(a.(*NnfStorageProfileRawData), b.(*v1alpha4.NnfStorageProfileRawData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileRawData)(nil), (*NnfStorageProfileRawData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(a.(*v1alpha4.NnfStorageProfileRawData), b.(*NnfStorageProfileRawData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageProfileXFSData)(nil), (*v1alpha4.NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(a.(*NnfStorageProfileXFSData), b.(*v1alpha4.NnfStorageProfileXFSData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageProfileXFSData)(nil), (*NnfStorageProfileXFSData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(a.(*v1alpha4.NnfStorageProfileXFSData), b.(*NnfStorageProfileXFSData), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageSpec)(nil), (*v1alpha4.NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(a.(*NnfStorageSpec), b.(*v1alpha4.NnfStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageSpec)(nil), (*NnfStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(a.(*v1alpha4.NnfStorageSpec), b.(*NnfStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfStorageStatus)(nil), (*v1alpha4.NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(a.(*NnfStorageStatus), b.(*v1alpha4.NnfStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageStatus)(nil), (*NnfStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(a.(*v1alpha4.NnfStorageStatus), b.(*NnfStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorage)(nil), (*v1alpha4.NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(a.(*NnfSystemStorage), b.(*v1alpha4.NnfSystemStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorage)(nil), (*NnfSystemStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(a.(*v1alpha4.NnfSystemStorage), b.(*NnfSystemStorage), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageList)(nil), (*v1alpha4.NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(a.(*NnfSystemStorageList), b.(*v1alpha4.NnfSystemStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageList)(nil), (*NnfSystemStorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(a.(*v1alpha4.NnfSystemStorageList), b.(*NnfSystemStorageList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageSpec)(nil), (*v1alpha4.NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(a.(*NnfSystemStorageSpec), b.(*v1alpha4.NnfSystemStorageSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NnfSystemStorageStatus)(nil), (*v1alpha4.NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(a.(*NnfSystemStorageStatus), b.(*v1alpha4.NnfSystemStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfSystemStorageStatus)(nil), (*NnfSystemStorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(a.(*v1alpha4.NnfSystemStorageStatus), b.(*NnfSystemStorageStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileLustreCmdLines)(nil), (*NnfStorageProfileLustreCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(a.(*v1alpha4.NnfStorageProfileLustreCmdLines), b.(*NnfStorageProfileLustreCmdLines), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1alpha4.NnfSystemStorageSpec)(nil), (*NnfSystemStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(a.(*v1alpha4.NnfSystemStorageSpec), b.(*NnfSystemStorageSpec), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { - out.FileSystemName = in.FileSystemName - out.TargetType = in.TargetType - out.StartIndex = in.StartIndex - out.MgsAddress = in.MgsAddress - out.BackFs = in.BackFs - return nil -} - -// Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in *LustreStorageSpec, out *v1alpha4.LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(in, out, s) -} - -func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - out.FileSystemName = in.FileSystemName - out.TargetType = in.TargetType - out.StartIndex = in.StartIndex - out.MgsAddress = in.MgsAddress - out.BackFs = in.BackFs - return nil -} - -// Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccess_To_v1alpha4_NnfAccess(in, out, s) -} - -func autoConvert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in *v1alpha4.NnfAccess, out *NnfAccess, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccess_To_v1alpha1_NnfAccess(in, out, s) -} - -func autoConvert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessList_To_v1alpha4_NnfAccessList(in, out, s) -} - -func autoConvert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccessList_To_v1alpha1_NnfAccessList(in, out, s) -} - -func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { - out.DesiredState = in.DesiredState - out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) - out.Target = in.Target - out.UserID = in.UserID - out.GroupID = in.GroupID - out.ClientReference = in.ClientReference - out.MountPath = in.MountPath - out.MakeClientMounts = in.MakeClientMounts - out.MountPathPrefix = in.MountPathPrefix - out.StorageReference = in.StorageReference - return nil -} - -// Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha4.NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - out.DesiredState = in.DesiredState - out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) - out.Target = in.Target - out.UserID = in.UserID - out.GroupID = in.GroupID - out.ClientReference = in.ClientReference - out.MountPath = in.MountPath - out.MakeClientMounts = in.MakeClientMounts - out.MountPathPrefix = in.MountPathPrefix - out.StorageReference = in.StorageReference - return nil -} - -// Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { - out.State = in.State - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { - out.State = in.State - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha4.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in *NnfContainerProfile, out *v1alpha4.NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfile_To_v1alpha4_NnfContainerProfile(in, out, s) -} - -func autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile is an autogenerated conversion function. -func Convert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in *v1alpha4.NnfContainerProfile, out *NnfContainerProfile, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfContainerProfile_To_v1alpha1_NnfContainerProfile(in, out, s) -} - -func autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { - out.Pinned = in.Pinned - out.Storages = *(*[]v1alpha4.NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) - out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) - out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) - out.RetryLimit = in.RetryLimit - out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) - out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) - out.NumPorts = in.NumPorts - out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) - out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) - return nil -} - -// Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in *NnfContainerProfileData, out *v1alpha4.NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileData_To_v1alpha4_NnfContainerProfileData(in, out, s) -} - -func autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { - out.Pinned = in.Pinned - out.Storages = *(*[]NnfContainerProfileStorage)(unsafe.Pointer(&in.Storages)) - out.PreRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PreRunTimeoutSeconds)) - out.PostRunTimeoutSeconds = (*int64)(unsafe.Pointer(in.PostRunTimeoutSeconds)) - out.RetryLimit = in.RetryLimit - out.UserID = (*uint32)(unsafe.Pointer(in.UserID)) - out.GroupID = (*uint32)(unsafe.Pointer(in.GroupID)) - out.NumPorts = in.NumPorts - out.Spec = (*v1.PodSpec)(unsafe.Pointer(in.Spec)) - out.MPISpec = (*v2beta1.MPIJobSpec)(unsafe.Pointer(in.MPISpec)) - return nil -} - -// Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData is an autogenerated conversion function. -func Convert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in *v1alpha4.NnfContainerProfileData, out *NnfContainerProfileData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfContainerProfileData_To_v1alpha1_NnfContainerProfileData(in, out, s) -} - -func autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfContainerProfile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in *NnfContainerProfileList, out *v1alpha4.NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileList_To_v1alpha4_NnfContainerProfileList(in, out, s) -} - -func autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfContainerProfile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList is an autogenerated conversion function. -func Convert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in *v1alpha4.NnfContainerProfileList, out *NnfContainerProfileList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfContainerProfileList_To_v1alpha1_NnfContainerProfileList(in, out, s) -} - -func autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { - out.Name = in.Name - out.Optional = in.Optional - out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) - return nil -} - -// Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in *NnfContainerProfileStorage, out *v1alpha4.NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfContainerProfileStorage_To_v1alpha4_NnfContainerProfileStorage(in, out, s) -} - -func autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { - out.Name = in.Name - out.Optional = in.Optional - out.PVCMode = v1.PersistentVolumeAccessMode(in.PVCMode) - return nil -} - -// Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage is an autogenerated conversion function. -func Convert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in *v1alpha4.NnfContainerProfileStorage, out *NnfContainerProfileStorage, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfContainerProfileStorage_To_v1alpha1_NnfContainerProfileStorage(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in *NnfDataMovement, out *v1alpha4.NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovement_To_v1alpha4_NnfDataMovement(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in *v1alpha4.NnfDataMovement, out *NnfDataMovement, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovement_To_v1alpha1_NnfDataMovement(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { - out.Command = in.Command - out.ElapsedTime = in.ElapsedTime - out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) - out.LastMessage = in.LastMessage - out.LastMessageTime = in.LastMessageTime - out.Seconds = in.Seconds - out.Items = (*int32)(unsafe.Pointer(in.Items)) - out.Directories = (*int32)(unsafe.Pointer(in.Directories)) - out.Files = (*int32)(unsafe.Pointer(in.Files)) - out.Links = (*int32)(unsafe.Pointer(in.Links)) - out.Data = in.Data - out.Rate = in.Rate - return nil -} - -// Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in *NnfDataMovementCommandStatus, out *v1alpha4.NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementCommandStatus_To_v1alpha4_NnfDataMovementCommandStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { - out.Command = in.Command - out.ElapsedTime = in.ElapsedTime - out.ProgressPercentage = (*int32)(unsafe.Pointer(in.ProgressPercentage)) - out.LastMessage = in.LastMessage - out.LastMessageTime = in.LastMessageTime - out.Seconds = in.Seconds - out.Items = (*int32)(unsafe.Pointer(in.Items)) - out.Directories = (*int32)(unsafe.Pointer(in.Directories)) - out.Files = (*int32)(unsafe.Pointer(in.Files)) - out.Links = (*int32)(unsafe.Pointer(in.Links)) - out.Data = in.Data - out.Rate = in.Rate - return nil -} - -// Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in *v1alpha4.NnfDataMovementCommandStatus, out *NnfDataMovementCommandStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementCommandStatus_To_v1alpha1_NnfDataMovementCommandStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { - out.Dryrun = in.Dryrun - out.MpirunOptions = in.MpirunOptions - out.DcpOptions = in.DcpOptions - out.LogStdout = in.LogStdout - out.StoreStdout = in.StoreStdout - out.Slots = (*int)(unsafe.Pointer(in.Slots)) - out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) - return nil -} - -// Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in *NnfDataMovementConfig, out *v1alpha4.NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementConfig_To_v1alpha4_NnfDataMovementConfig(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { - out.Dryrun = in.Dryrun - out.MpirunOptions = in.MpirunOptions - out.DcpOptions = in.DcpOptions - out.LogStdout = in.LogStdout - out.StoreStdout = in.StoreStdout - out.Slots = (*int)(unsafe.Pointer(in.Slots)) - out.MaxSlots = (*int)(unsafe.Pointer(in.MaxSlots)) - return nil -} - -// Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in *v1alpha4.NnfDataMovementConfig, out *NnfDataMovementConfig, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementConfig_To_v1alpha1_NnfDataMovementConfig(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfDataMovement)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in *NnfDataMovementList, out *v1alpha4.NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementList_To_v1alpha4_NnfDataMovementList(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfDataMovement)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in *v1alpha4.NnfDataMovementList, out *NnfDataMovementList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementList_To_v1alpha1_NnfDataMovementList(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in *NnfDataMovementManager, out *v1alpha4.NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManager_To_v1alpha4_NnfDataMovementManager(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in *v1alpha4.NnfDataMovementManager, out *NnfDataMovementManager, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementManager_To_v1alpha1_NnfDataMovementManager(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfDataMovementManager)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in *NnfDataMovementManagerList, out *v1alpha4.NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerList_To_v1alpha4_NnfDataMovementManagerList(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfDataMovementManager)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in *v1alpha4.NnfDataMovementManagerList, out *NnfDataMovementManagerList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementManagerList_To_v1alpha1_NnfDataMovementManagerList(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { - out.Selector = in.Selector - out.Template = in.Template - out.UpdateStrategy = in.UpdateStrategy - out.HostPath = in.HostPath - out.MountPath = in.MountPath - return nil -} - -// Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in *NnfDataMovementManagerSpec, out *v1alpha4.NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerSpec_To_v1alpha4_NnfDataMovementManagerSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { - out.Selector = in.Selector - out.Template = in.Template - out.UpdateStrategy = in.UpdateStrategy - out.HostPath = in.HostPath - out.MountPath = in.MountPath - return nil -} - -// Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in *v1alpha4.NnfDataMovementManagerSpec, out *NnfDataMovementManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementManagerSpec_To_v1alpha1_NnfDataMovementManagerSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in *NnfDataMovementManagerStatus, out *v1alpha4.NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementManagerStatus_To_v1alpha4_NnfDataMovementManagerStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in *v1alpha4.NnfDataMovementManagerStatus, out *NnfDataMovementManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementManagerStatus_To_v1alpha1_NnfDataMovementManagerStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in *NnfDataMovementProfile, out *v1alpha4.NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in *v1alpha4.NnfDataMovementProfile, out *NnfDataMovementProfile, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementProfile_To_v1alpha1_NnfDataMovementProfile(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { - out.Default = in.Default - out.Pinned = in.Pinned - out.Slots = in.Slots - out.MaxSlots = in.MaxSlots - out.Command = in.Command - out.LogStdout = in.LogStdout - out.StoreStdout = in.StoreStdout - out.ProgressIntervalSeconds = in.ProgressIntervalSeconds - out.CreateDestDir = in.CreateDestDir - out.StatCommand = in.StatCommand - return nil -} - -// Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in *NnfDataMovementProfileData, out *v1alpha4.NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfileData_To_v1alpha4_NnfDataMovementProfileData(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - out.Default = in.Default - out.Pinned = in.Pinned - out.Slots = in.Slots - out.MaxSlots = in.MaxSlots - out.Command = in.Command - out.LogStdout = in.LogStdout - out.StoreStdout = in.StoreStdout - out.ProgressIntervalSeconds = in.ProgressIntervalSeconds - out.CreateDestDir = in.CreateDestDir - out.StatCommand = in.StatCommand - return nil -} - -// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha1_NnfDataMovementProfileData(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha1_NnfDataMovementProfileList(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { - out.Source = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) - out.Destination = (*v1alpha4.NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) - out.UserId = in.UserId - out.GroupId = in.GroupId - out.Cancel = in.Cancel - out.ProfileReference = in.ProfileReference - out.UserConfig = (*v1alpha4.NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) - return nil -} - -// Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in *NnfDataMovementSpec, out *v1alpha4.NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementSpec_To_v1alpha4_NnfDataMovementSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { - out.Source = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Source)) - out.Destination = (*NnfDataMovementSpecSourceDestination)(unsafe.Pointer(in.Destination)) - out.UserId = in.UserId - out.GroupId = in.GroupId - out.Cancel = in.Cancel - out.ProfileReference = in.ProfileReference - out.UserConfig = (*NnfDataMovementConfig)(unsafe.Pointer(in.UserConfig)) - return nil -} - -// Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in *v1alpha4.NnfDataMovementSpec, out *NnfDataMovementSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementSpec_To_v1alpha1_NnfDataMovementSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - out.Path = in.Path - out.StorageReference = in.StorageReference - return nil -} - -// Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in *NnfDataMovementSpecSourceDestination, out *v1alpha4.NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementSpecSourceDestination_To_v1alpha4_NnfDataMovementSpecSourceDestination(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - out.Path = in.Path - out.StorageReference = in.StorageReference - return nil -} - -// Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in *v1alpha4.NnfDataMovementSpecSourceDestination, out *NnfDataMovementSpecSourceDestination, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementSpecSourceDestination_To_v1alpha1_NnfDataMovementSpecSourceDestination(in, out, s) -} - -func autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { - out.State = in.State - out.Status = in.Status - out.Message = in.Message - out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) - out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) - out.Restarts = in.Restarts - out.CommandStatus = (*v1alpha4.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in *NnfDataMovementStatus, out *v1alpha4.NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha4_NnfDataMovementStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { - out.State = in.State - out.Status = in.Status - out.Message = in.Message - out.StartTime = (*metav1.MicroTime)(unsafe.Pointer(in.StartTime)) - out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) - out.Restarts = in.Restarts - out.CommandStatus = (*NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in *v1alpha4.NnfDataMovementStatus, out *NnfDataMovementStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { - out.Model = in.Model - out.SerialNumber = in.SerialNumber - out.FirmwareVersion = in.FirmwareVersion - out.Slot = in.Slot - out.Capacity = in.Capacity - out.WearLevel = in.WearLevel - if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in *NnfDriveStatus, out *v1alpha4.NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfDriveStatus_To_v1alpha4_NnfDriveStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { - out.Model = in.Model - out.SerialNumber = in.SerialNumber - out.FirmwareVersion = in.FirmwareVersion - out.Slot = in.Slot - out.Capacity = in.Capacity - out.WearLevel = in.WearLevel - if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in *v1alpha4.NnfDriveStatus, out *NnfDriveStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDriveStatus_To_v1alpha1_NnfDriveStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in *NnfLustreMGT, out *v1alpha4.NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGT_To_v1alpha4_NnfLustreMGT(in, out, s) -} - -func autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT is an autogenerated conversion function. -func Convert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in *v1alpha4.NnfLustreMGT, out *NnfLustreMGT, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfLustreMGT_To_v1alpha1_NnfLustreMGT(in, out, s) -} - -func autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfLustreMGT)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in *NnfLustreMGTList, out *v1alpha4.NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTList_To_v1alpha4_NnfLustreMGTList(in, out, s) -} - -func autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfLustreMGT)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList is an autogenerated conversion function. -func Convert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in *v1alpha4.NnfLustreMGTList, out *NnfLustreMGTList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfLustreMGTList_To_v1alpha1_NnfLustreMGTList(in, out, s) -} - -func autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { - out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) - out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) - out.FsNameStart = in.FsNameStart - out.FsNameStartReference = in.FsNameStartReference - out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) - return nil -} - -// Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in *NnfLustreMGTSpec, out *v1alpha4.NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTSpec_To_v1alpha4_NnfLustreMGTSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { - out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses)) - out.FsNameBlackList = *(*[]string)(unsafe.Pointer(&in.FsNameBlackList)) - out.FsNameStart = in.FsNameStart - out.FsNameStartReference = in.FsNameStartReference - out.ClaimList = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ClaimList)) - return nil -} - -// Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha4.NnfLustreMGTSpec, out *NnfLustreMGTSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { - out.FsNameNext = in.FsNameNext - out.ClaimList = *(*[]v1alpha4.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha4.NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha4_NnfLustreMGTStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { - out.FsNameNext = in.FsNameNext - out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha4.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { - out.Reference = in.Reference - out.FsName = in.FsName - return nil -} - -// Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in *NnfLustreMGTStatusClaim, out *v1alpha4.NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfLustreMGTStatusClaim_To_v1alpha4_NnfLustreMGTStatusClaim(in, out, s) -} - -func autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { - out.Reference = in.Reference - out.FsName = in.FsName - return nil -} - -// Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim is an autogenerated conversion function. -func Convert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in *v1alpha4.NnfLustreMGTStatusClaim, out *NnfLustreMGTStatusClaim, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfLustreMGTStatusClaim_To_v1alpha1_NnfLustreMGTStatusClaim(in, out, s) -} - -func autoConvert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode is an autogenerated conversion function. -func Convert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in *NnfNode, out *v1alpha4.NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNode_To_v1alpha4_NnfNode(in, out, s) -} - -func autoConvert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode is an autogenerated conversion function. -func Convert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in *v1alpha4.NnfNode, out *NnfNode, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNode_To_v1alpha1_NnfNode(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in *NnfNodeBlockStorage, out *v1alpha4.NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorage_To_v1alpha4_NnfNodeBlockStorage(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in *v1alpha4.NnfNodeBlockStorage, out *NnfNodeBlockStorage, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorage_To_v1alpha1_NnfNodeBlockStorage(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) - out.StorageGroupId = in.StorageGroupId - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in *NnfNodeBlockStorageAccessStatus, out *v1alpha4.NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAccessStatus_To_v1alpha4_NnfNodeBlockStorageAccessStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - out.DevicePaths = *(*[]string)(unsafe.Pointer(&in.DevicePaths)) - out.StorageGroupId = in.StorageGroupId - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in *v1alpha4.NnfNodeBlockStorageAccessStatus, out *NnfNodeBlockStorageAccessStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageAccessStatus_To_v1alpha1_NnfNodeBlockStorageAccessStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - out.Capacity = in.Capacity - out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in *NnfNodeBlockStorageAllocationSpec, out *v1alpha4.NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationSpec_To_v1alpha4_NnfNodeBlockStorageAllocationSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - out.Capacity = in.Capacity - out.Access = *(*[]string)(unsafe.Pointer(&in.Access)) - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in *v1alpha4.NnfNodeBlockStorageAllocationSpec, out *NnfNodeBlockStorageAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationSpec_To_v1alpha1_NnfNodeBlockStorageAllocationSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - out.Accesses = *(*map[string]v1alpha4.NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) - out.Devices = *(*[]v1alpha4.NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) - out.CapacityAllocated = in.CapacityAllocated - out.StoragePoolId = in.StoragePoolId - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in *NnfNodeBlockStorageAllocationStatus, out *v1alpha4.NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageAllocationStatus_To_v1alpha4_NnfNodeBlockStorageAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - out.Accesses = *(*map[string]NnfNodeBlockStorageAccessStatus)(unsafe.Pointer(&in.Accesses)) - out.Devices = *(*[]NnfNodeBlockStorageDeviceStatus)(unsafe.Pointer(&in.Devices)) - out.CapacityAllocated = in.CapacityAllocated - out.StoragePoolId = in.StoragePoolId - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in *v1alpha4.NnfNodeBlockStorageAllocationStatus, out *NnfNodeBlockStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageAllocationStatus_To_v1alpha1_NnfNodeBlockStorageAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - out.NQN = in.NQN - out.NamespaceId = in.NamespaceId - out.CapacityAllocated = in.CapacityAllocated - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in *NnfNodeBlockStorageDeviceStatus, out *v1alpha4.NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageDeviceStatus_To_v1alpha4_NnfNodeBlockStorageDeviceStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - out.NQN = in.NQN - out.NamespaceId = in.NamespaceId - out.CapacityAllocated = in.CapacityAllocated - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in *v1alpha4.NnfNodeBlockStorageDeviceStatus, out *NnfNodeBlockStorageDeviceStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageDeviceStatus_To_v1alpha1_NnfNodeBlockStorageDeviceStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in *NnfNodeBlockStorageList, out *v1alpha4.NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageList_To_v1alpha4_NnfNodeBlockStorageList(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNodeBlockStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in *v1alpha4.NnfNodeBlockStorageList, out *NnfNodeBlockStorageList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageList_To_v1alpha1_NnfNodeBlockStorageList(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { - out.SharedAllocation = in.SharedAllocation - out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in *NnfNodeBlockStorageSpec, out *v1alpha4.NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageSpec_To_v1alpha4_NnfNodeBlockStorageSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { - out.SharedAllocation = in.SharedAllocation - out.Allocations = *(*[]NnfNodeBlockStorageAllocationSpec)(unsafe.Pointer(&in.Allocations)) - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in *v1alpha4.NnfNodeBlockStorageSpec, out *NnfNodeBlockStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha4.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError - out.PodStartTime = in.PodStartTime - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha4.NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha4_NnfNodeBlockStorageStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError - out.PodStartTime = in.PodStartTime - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha4.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in *NnfNodeECData, out *v1alpha4.NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECData_To_v1alpha4_NnfNodeECData(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in *v1alpha4.NnfNodeECData, out *NnfNodeECData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeECData_To_v1alpha1_NnfNodeECData(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNodeECData)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in *NnfNodeECDataList, out *v1alpha4.NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataList_To_v1alpha4_NnfNodeECDataList(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNodeECData)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in *v1alpha4.NnfNodeECDataList, out *NnfNodeECDataList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeECDataList_To_v1alpha1_NnfNodeECDataList(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { - return nil -} - -// Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in *NnfNodeECDataSpec, out *v1alpha4.NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataSpec_To_v1alpha4_NnfNodeECDataSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { - return nil -} - -// Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in *v1alpha4.NnfNodeECDataSpec, out *NnfNodeECDataSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeECDataSpec_To_v1alpha1_NnfNodeECDataSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { - out.Data = *(*map[string]v1alpha4.NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in *NnfNodeECDataStatus, out *v1alpha4.NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeECDataStatus_To_v1alpha4_NnfNodeECDataStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { - out.Data = *(*map[string]NnfNodeECPrivateData)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in *v1alpha4.NnfNodeECDataStatus, out *NnfNodeECDataStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeECDataStatus_To_v1alpha1_NnfNodeECDataStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNode)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in *NnfNodeList, out *v1alpha4.NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeList_To_v1alpha4_NnfNodeList(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNode)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in *v1alpha4.NnfNodeList, out *NnfNodeList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeList_To_v1alpha1_NnfNodeList(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { - out.Name = in.Name - out.Pod = in.Pod - out.State = v1alpha4.NnfResourceStateType(in.State) - return nil -} - -// Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in *NnfNodeSpec, out *v1alpha4.NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeSpec_To_v1alpha4_NnfNodeSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { - out.Name = in.Name - out.Pod = in.Pod - out.State = NnfResourceStateType(in.State) - return nil -} - -// Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in *v1alpha4.NnfNodeSpec, out *NnfNodeSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeSpec_To_v1alpha1_NnfNodeSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { - out.Status = v1alpha4.NnfResourceStatusType(in.Status) - out.Health = v1alpha4.NnfResourceHealthType(in.Health) - out.Fenced = in.Fenced - out.LNetNid = in.LNetNid - out.Capacity = in.Capacity - out.CapacityAllocated = in.CapacityAllocated - out.Servers = *(*[]v1alpha4.NnfServerStatus)(unsafe.Pointer(&in.Servers)) - out.Drives = *(*[]v1alpha4.NnfDriveStatus)(unsafe.Pointer(&in.Drives)) - return nil -} - -// Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in *NnfNodeStatus, out *v1alpha4.NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStatus_To_v1alpha4_NnfNodeStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { - out.Status = NnfResourceStatusType(in.Status) - out.Health = NnfResourceHealthType(in.Health) - out.Fenced = in.Fenced - out.LNetNid = in.LNetNid - out.Capacity = in.Capacity - out.CapacityAllocated = in.CapacityAllocated - out.Servers = *(*[]NnfServerStatus)(unsafe.Pointer(&in.Servers)) - out.Drives = *(*[]NnfDriveStatus)(unsafe.Pointer(&in.Drives)) - return nil -} - -// Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in *v1alpha4.NnfNodeStatus, out *NnfNodeStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStatus_To_v1alpha1_NnfNodeStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in *NnfNodeStorage, out *v1alpha4.NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in *v1alpha4.NnfNodeStorage, out *NnfNodeStorage, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStorage_To_v1alpha1_NnfNodeStorage(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { - out.VolumeGroup = in.VolumeGroup - out.LogicalVolume = in.LogicalVolume - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in *NnfNodeStorageAllocationStatus, out *v1alpha4.NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageAllocationStatus_To_v1alpha4_NnfNodeStorageAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { - out.VolumeGroup = in.VolumeGroup - out.LogicalVolume = in.LogicalVolume - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in *v1alpha4.NnfNodeStorageAllocationStatus, out *NnfNodeStorageAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha1_NnfNodeStorageAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha1_NnfNodeStorageList(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { - out.Count = in.Count - out.SharedAllocation = in.SharedAllocation - out.Capacity = in.Capacity - out.UserID = in.UserID - out.GroupID = in.GroupID - out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha1_LustreStorageSpec_To_v1alpha4_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { - return err - } - out.BlockReference = in.BlockReference - return nil -} - -// Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in *NnfNodeStorageSpec, out *v1alpha4.NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageSpec_To_v1alpha4_NnfNodeStorageSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { - out.Count = in.Count - out.SharedAllocation = in.SharedAllocation - out.Capacity = in.Capacity - out.UserID = in.UserID - out.GroupID = in.GroupID - out.FileSystemType = in.FileSystemType - if err := Convert_v1alpha4_LustreStorageSpec_To_v1alpha1_LustreStorageSpec(&in.LustreStorage, &out.LustreStorage, s); err != nil { - return err - } - out.BlockReference = in.BlockReference - return nil -} - -// Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1alpha4.NnfNodeStorageSpec, out *NnfNodeStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha4.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha4.NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha4_NnfNodeStorageStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { - out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha4.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in *NnfPortManager, out *v1alpha4.NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManager_To_v1alpha4_NnfPortManager(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in *v1alpha4.NnfPortManager, out *NnfPortManager, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManager_To_v1alpha1_NnfPortManager(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { - out.Requester = in.Requester - out.Count = in.Count - return nil -} - -// Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in *NnfPortManagerAllocationSpec, out *v1alpha4.NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerAllocationSpec_To_v1alpha4_NnfPortManagerAllocationSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { - out.Requester = in.Requester - out.Count = in.Count - return nil -} - -// Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in *v1alpha4.NnfPortManagerAllocationSpec, out *NnfPortManagerAllocationSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManagerAllocationSpec_To_v1alpha1_NnfPortManagerAllocationSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { - out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) - out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) - out.Status = v1alpha4.NnfPortManagerAllocationStatusStatus(in.Status) - out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) - return nil -} - -// Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in *NnfPortManagerAllocationStatus, out *v1alpha4.NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerAllocationStatus_To_v1alpha4_NnfPortManagerAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { - out.Requester = (*v1.ObjectReference)(unsafe.Pointer(in.Requester)) - out.Ports = *(*[]uint16)(unsafe.Pointer(&in.Ports)) - out.Status = NnfPortManagerAllocationStatusStatus(in.Status) - out.TimeUnallocated = (*metav1.Time)(unsafe.Pointer(in.TimeUnallocated)) - return nil -} - -// Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in *v1alpha4.NnfPortManagerAllocationStatus, out *NnfPortManagerAllocationStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManagerAllocationStatus_To_v1alpha1_NnfPortManagerAllocationStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfPortManager)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in *NnfPortManagerList, out *v1alpha4.NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerList_To_v1alpha4_NnfPortManagerList(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfPortManager)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in *v1alpha4.NnfPortManagerList, out *NnfPortManagerList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManagerList_To_v1alpha1_NnfPortManagerList(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { - out.SystemConfiguration = in.SystemConfiguration - out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) - return nil -} - -// Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in *NnfPortManagerSpec, out *v1alpha4.NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerSpec_To_v1alpha4_NnfPortManagerSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { - out.SystemConfiguration = in.SystemConfiguration - out.Allocations = *(*[]NnfPortManagerAllocationSpec)(unsafe.Pointer(&in.Allocations)) - return nil -} - -// Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in *v1alpha4.NnfPortManagerSpec, out *NnfPortManagerSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManagerSpec_To_v1alpha1_NnfPortManagerSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { - out.Allocations = *(*[]v1alpha4.NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Status = v1alpha4.NnfPortManagerStatusStatus(in.Status) - return nil -} - -// Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in *NnfPortManagerStatus, out *v1alpha4.NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfPortManagerStatus_To_v1alpha4_NnfPortManagerStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { - out.Allocations = *(*[]NnfPortManagerAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.Status = NnfPortManagerStatusStatus(in.Status) - return nil -} - -// Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in *v1alpha4.NnfPortManagerStatus, out *NnfPortManagerStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfPortManagerStatus_To_v1alpha1_NnfPortManagerStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { - out.ID = in.ID - out.Name = in.Name - out.Status = v1alpha4.NnfResourceStatusType(in.Status) - out.Health = v1alpha4.NnfResourceHealthType(in.Health) - return nil -} - -// Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in *NnfResourceStatus, out *v1alpha4.NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { - out.ID = in.ID - out.Name = in.Name - out.Status = NnfResourceStatusType(in.Status) - out.Health = NnfResourceHealthType(in.Health) - return nil -} - -// Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in *v1alpha4.NnfResourceStatus, out *NnfResourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { - out.Hostname = in.Hostname - if err := Convert_v1alpha1_NnfResourceStatus_To_v1alpha4_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in *NnfServerStatus, out *v1alpha4.NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfServerStatus_To_v1alpha4_NnfServerStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { - out.Hostname = in.Hostname - if err := Convert_v1alpha4_NnfResourceStatus_To_v1alpha1_NnfResourceStatus(&in.NnfResourceStatus, &out.NnfResourceStatus, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in *v1alpha4.NnfServerStatus, out *NnfServerStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfServerStatus_To_v1alpha1_NnfServerStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in *NnfStorage, out *v1alpha4.NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorage_To_v1alpha4_NnfStorage(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in *v1alpha4.NnfStorage, out *NnfStorage, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorage_To_v1alpha1_NnfStorage(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { - out.Name = in.Name - out.Count = in.Count - return nil -} - -// Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in *NnfStorageAllocationNodes, out *v1alpha4.NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationNodes_To_v1alpha4_NnfStorageAllocationNodes(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { - out.Name = in.Name - out.Count = in.Count - return nil -} - -// Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in *v1alpha4.NnfStorageAllocationNodes, out *NnfStorageAllocationNodes, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageAllocationNodes_To_v1alpha1_NnfStorageAllocationNodes(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { - out.Name = in.Name - out.Capacity = in.Capacity - if err := Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { - return err - } - out.SharedAllocation = in.SharedAllocation - out.Nodes = *(*[]v1alpha4.NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) - return nil -} - -// Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in *NnfStorageAllocationSetSpec, out *v1alpha4.NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationSetSpec_To_v1alpha4_NnfStorageAllocationSetSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { - out.Name = in.Name - out.Capacity = in.Capacity - if err := Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(&in.NnfStorageLustreSpec, &out.NnfStorageLustreSpec, s); err != nil { - return err - } - out.SharedAllocation = in.SharedAllocation - out.Nodes = *(*[]NnfStorageAllocationNodes)(unsafe.Pointer(&in.Nodes)) - return nil -} - -// Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in *v1alpha4.NnfStorageAllocationSetSpec, out *NnfStorageAllocationSetSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageAllocationSetSpec_To_v1alpha1_NnfStorageAllocationSetSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.AllocationCount = in.AllocationCount - return nil -} - -// Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in *NnfStorageAllocationSetStatus, out *v1alpha4.NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageAllocationSetStatus_To_v1alpha4_NnfStorageAllocationSetStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.AllocationCount = in.AllocationCount - return nil -} - -// Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in *v1alpha4.NnfStorageAllocationSetStatus, out *NnfStorageAllocationSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha1_NnfStorageAllocationSetStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageList_To_v1alpha4_NnfStorageList(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageList_To_v1alpha1_NnfStorageList(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { - out.TargetType = in.TargetType - out.BackFs = in.BackFs - out.MgsAddress = in.MgsAddress - out.PersistentMgsReference = in.PersistentMgsReference - return nil -} - -// Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in *NnfStorageLustreSpec, out *v1alpha4.NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageLustreSpec_To_v1alpha4_NnfStorageLustreSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { - out.TargetType = in.TargetType - out.BackFs = in.BackFs - out.MgsAddress = in.MgsAddress - out.PersistentMgsReference = in.PersistentMgsReference - return nil -} - -// Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in *v1alpha4.NnfStorageLustreSpec, out *NnfStorageLustreSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageLustreSpec_To_v1alpha1_NnfStorageLustreSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { - out.MgsAddress = in.MgsAddress - out.FileSystemName = in.FileSystemName - out.LustreMgtReference = in.LustreMgtReference - return nil -} - -// Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in *NnfStorageLustreStatus, out *v1alpha4.NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - out.MgsAddress = in.MgsAddress - out.FileSystemName = in.FileSystemName - out.LustreMgtReference = in.LustreMgtReference - return nil -} - -// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in *v1alpha4.NnfStorageProfile, out *NnfStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { - out.Mkfs = in.Mkfs - out.SharedVg = in.SharedVg - out.PvCreate = in.PvCreate - out.PvRemove = in.PvRemove - out.VgCreate = in.VgCreate - if err := Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { - return err - } - out.VgRemove = in.VgRemove - out.LvCreate = in.LvCreate - if err := Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { - return err - } - out.LvRemove = in.LvRemove - out.MountRabbit = in.MountRabbit - out.MountCompute = in.MountCompute - return nil -} - -// Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in *NnfStorageProfileCmdLines, out *v1alpha4.NnfStorageProfileCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(in *v1alpha4.NnfStorageProfileCmdLines, out *NnfStorageProfileCmdLines, s conversion.Scope) error { - out.Mkfs = in.Mkfs - out.SharedVg = in.SharedVg - out.PvCreate = in.PvCreate - out.PvRemove = in.PvRemove - out.VgCreate = in.VgCreate - if err := Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(&in.VgChange, &out.VgChange, s); err != nil { - return err - } - out.VgRemove = in.VgRemove - out.LvCreate = in.LvCreate - if err := Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(&in.LvChange, &out.LvChange, s); err != nil { - return err - } - out.LvRemove = in.LvRemove - out.MountRabbit = in.MountRabbit - // WARNING: in.PostMount requires manual conversion: does not exist in peer-type - out.MountCompute = in.MountCompute - // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { - out.Default = in.Default - out.Pinned = in.Pinned - if err := Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in *NnfStorageProfileData, out *v1alpha4.NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { - out.Default = in.Default - out.Pinned = in.Pinned - if err := Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(&in.LustreStorage, &out.LustreStorage, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(&in.GFS2Storage, &out.GFS2Storage, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(&in.XFSStorage, &out.XFSStorage, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(&in.RawStorage, &out.RawStorage, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in *v1alpha4.NnfStorageProfileData, out *NnfStorageProfileData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileData_To_v1alpha1_NnfStorageProfileData(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in *NnfStorageProfileGFS2Data, out *v1alpha4.NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileGFS2Data_To_v1alpha4_NnfStorageProfileGFS2Data(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in *v1alpha4.NnfStorageProfileGFS2Data, out *NnfStorageProfileGFS2Data, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileGFS2Data_To_v1alpha1_NnfStorageProfileGFS2Data(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - out.Activate = in.Activate - out.Deactivate = in.Deactivate - return nil -} - -// Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in *NnfStorageProfileLVMLvChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - out.Activate = in.Activate - out.Deactivate = in.Deactivate - return nil -} - -// Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMLvChangeCmdLines, out *NnfStorageProfileLVMLvChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLVMLvChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMLvChangeCmdLines(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - out.LockStart = in.LockStart - out.LockStop = in.LockStop - return nil -} - -// Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in *NnfStorageProfileLVMVgChangeCmdLines, out *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - out.LockStart = in.LockStart - out.LockStop = in.LockStop - return nil -} - -// Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in *v1alpha4.NnfStorageProfileLVMVgChangeCmdLines, out *NnfStorageProfileLVMVgChangeCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLVMVgChangeCmdLines_To_v1alpha1_NnfStorageProfileLVMVgChangeCmdLines(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1alpha4.NnfStorageProfile, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in *NnfStorageProfileList, out *v1alpha4.NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileList_To_v1alpha4_NnfStorageProfileList(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfStorageProfile, len(*in)) - for i := range *in { - if err := Convert_v1alpha4_NnfStorageProfile_To_v1alpha1_NnfStorageProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in *v1alpha4.NnfStorageProfileList, out *NnfStorageProfileList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileList_To_v1alpha1_NnfStorageProfileList(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - out.ZpoolCreate = in.ZpoolCreate - out.Mkfs = in.Mkfs - out.MountTarget = in.MountTarget - return nil -} - -// Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in *NnfStorageProfileLustreCmdLines, out *v1alpha4.NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(in *v1alpha4.NnfStorageProfileLustreCmdLines, out *NnfStorageProfileLustreCmdLines, s conversion.Scope) error { - out.ZpoolCreate = in.ZpoolCreate - out.Mkfs = in.Mkfs - out.MountTarget = in.MountTarget - // WARNING: in.PostActivate requires manual conversion: does not exist in peer-type - // WARNING: in.PostMount requires manual conversion: does not exist in peer-type - // WARNING: in.PreUnmount requires manual conversion: does not exist in peer-type - // WARNING: in.PreDeactivate requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { - out.CombinedMGTMDT = in.CombinedMGTMDT - out.ExternalMGS = in.ExternalMGS - out.CapacityMGT = in.CapacityMGT - out.CapacityMDT = in.CapacityMDT - out.ExclusiveMDT = in.ExclusiveMDT - out.CapacityScalingFactor = in.CapacityScalingFactor - out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreCmdLines_To_v1alpha4_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { - return err - } - out.MountRabbit = in.MountRabbit - out.MountCompute = in.MountCompute - return nil -} - -// Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in *NnfStorageProfileLustreData, out *v1alpha4.NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreData_To_v1alpha4_NnfStorageProfileLustreData(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { - out.CombinedMGTMDT = in.CombinedMGTMDT - out.ExternalMGS = in.ExternalMGS - out.CapacityMGT = in.CapacityMGT - out.CapacityMDT = in.CapacityMDT - out.ExclusiveMDT = in.ExclusiveMDT - out.CapacityScalingFactor = in.CapacityScalingFactor - out.StandaloneMGTPoolName = in.StandaloneMGTPoolName - if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtCmdLines, &out.MgtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MdtCmdLines, &out.MdtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.MgtMdtCmdLines, &out.MgtMdtCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha1_NnfStorageProfileLustreCmdLines(&in.OstCmdLines, &out.OstCmdLines, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtOptions, &out.MgtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MdtOptions, &out.MdtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.MgtMdtOptions, &out.MgtMdtOptions, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(&in.OstOptions, &out.OstOptions, s); err != nil { - return err - } - out.MountRabbit = in.MountRabbit - out.MountCompute = in.MountCompute - return nil -} - -// Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in *v1alpha4.NnfStorageProfileLustreData, out *NnfStorageProfileLustreData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLustreData_To_v1alpha1_NnfStorageProfileLustreData(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - out.ColocateComputes = in.ColocateComputes - out.Count = in.Count - out.Scale = in.Scale - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - return nil -} - -// Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in *NnfStorageProfileLustreMiscOptions, out *v1alpha4.NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileLustreMiscOptions_To_v1alpha4_NnfStorageProfileLustreMiscOptions(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - out.ColocateComputes = in.ColocateComputes - out.Count = in.Count - out.Scale = in.Scale - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - return nil -} - -// Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in *v1alpha4.NnfStorageProfileLustreMiscOptions, out *NnfStorageProfileLustreMiscOptions, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileLustreMiscOptions_To_v1alpha1_NnfStorageProfileLustreMiscOptions(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in *NnfStorageProfileRawData, out *v1alpha4.NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileRawData_To_v1alpha4_NnfStorageProfileRawData(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in *v1alpha4.NnfStorageProfileRawData, out *NnfStorageProfileRawData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileRawData_To_v1alpha1_NnfStorageProfileRawData(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageProfileCmdLines_To_v1alpha4_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in *NnfStorageProfileXFSData, out *v1alpha4.NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageProfileXFSData_To_v1alpha4_NnfStorageProfileXFSData(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - if err := Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha1_NnfStorageProfileCmdLines(&in.CmdLines, &out.CmdLines, s); err != nil { - return err - } - out.StorageLabels = *(*[]string)(unsafe.Pointer(&in.StorageLabels)) - out.CapacityScalingFactor = in.CapacityScalingFactor - return nil -} - -// Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in *v1alpha4.NnfStorageProfileXFSData, out *NnfStorageProfileXFSData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageProfileXFSData_To_v1alpha1_NnfStorageProfileXFSData(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { - out.FileSystemType = in.FileSystemType - out.UserID = in.UserID - out.GroupID = in.GroupID - out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) - return nil -} - -// Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in *NnfStorageSpec, out *v1alpha4.NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageSpec_To_v1alpha4_NnfStorageSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { - out.FileSystemType = in.FileSystemType - out.UserID = in.UserID - out.GroupID = in.GroupID - out.AllocationSets = *(*[]NnfStorageAllocationSetSpec)(unsafe.Pointer(&in.AllocationSets)) - return nil -} - -// Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in *v1alpha4.NnfStorageSpec, out *NnfStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageSpec_To_v1alpha1_NnfStorageSpec(in, out, s) -} - -func autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha1_NnfStorageLustreStatus_To_v1alpha4_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { - return err - } - out.AllocationSets = *(*[]v1alpha4.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in *NnfStorageStatus, out *v1alpha4.NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha4_NnfStorageStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - if err := Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha1_NnfStorageLustreStatus(&in.NnfStorageLustreStatus, &out.NnfStorageLustreStatus, s); err != nil { - return err - } - out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError - out.Ready = in.Ready - return nil -} - -// Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1alpha4.NnfStorageStatus, out *NnfStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in, out, s) -} - -func autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in *NnfSystemStorage, out *v1alpha4.NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(in, out, s) -} - -func autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage is an autogenerated conversion function. -func Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in *v1alpha4.NnfSystemStorage, out *NnfSystemStorage, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(in, out, s) -} - -func autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1alpha4.NnfSystemStorage, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_NnfSystemStorage_To_v1alpha4_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in *NnfSystemStorageList, out *v1alpha4.NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageList_To_v1alpha4_NnfSystemStorageList(in, out, s) -} - -func autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfSystemStorage, len(*in)) - for i := range *in { - if err := Convert_v1alpha4_NnfSystemStorage_To_v1alpha1_NnfSystemStorage(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList is an autogenerated conversion function. -func Convert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in *v1alpha4.NnfSystemStorageList, out *NnfSystemStorageList, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorageList_To_v1alpha1_NnfSystemStorageList(in, out, s) -} - -func autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { - out.SystemConfiguration = in.SystemConfiguration - out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) - out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) - out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) - out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) - out.ComputesTarget = v1alpha4.NnfSystemStorageComputesTarget(in.ComputesTarget) - out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) - out.Capacity = in.Capacity - out.Type = in.Type - out.StorageProfile = in.StorageProfile - out.MakeClientMounts = in.MakeClientMounts - out.ClientMountPath = in.ClientMountPath - return nil -} - -// Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in *NnfSystemStorageSpec, out *v1alpha4.NnfSystemStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageSpec_To_v1alpha4_NnfSystemStorageSpec(in, out, s) -} - -func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec(in *v1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s conversion.Scope) error { - out.SystemConfiguration = in.SystemConfiguration - out.ExcludeRabbits = *(*[]string)(unsafe.Pointer(&in.ExcludeRabbits)) - out.IncludeRabbits = *(*[]string)(unsafe.Pointer(&in.IncludeRabbits)) - // WARNING: in.ExcludeDisabledRabbits requires manual conversion: does not exist in peer-type - out.ExcludeComputes = *(*[]string)(unsafe.Pointer(&in.ExcludeComputes)) - out.IncludeComputes = *(*[]string)(unsafe.Pointer(&in.IncludeComputes)) - out.ComputesTarget = NnfSystemStorageComputesTarget(in.ComputesTarget) - out.ComputesPattern = *(*[]int)(unsafe.Pointer(&in.ComputesPattern)) - out.Capacity = in.Capacity - out.Type = in.Type - // WARNING: in.Shared requires manual conversion: does not exist in peer-type - out.StorageProfile = in.StorageProfile - out.MakeClientMounts = in.MakeClientMounts - out.ClientMountPath = in.ClientMountPath - return nil -} - -func autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha4.NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha4_NnfSystemStorageStatus(in, out, s) -} - -func autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.ResourceError = in.ResourceError - return nil -} - -// Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha4.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in, out, s) -} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index de055172..00000000 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,2022 +0,0 @@ -//go:build !ignore_autogenerated - -/* - * Copyright 2024 Hewlett Packard Enterprise Development LP - * Other additional copyright holders may be indicated within. - * - * The entirety of this work is licensed under the Apache License, - * Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. - * - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" - "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LustreStorageSpec) DeepCopyInto(out *LustreStorageSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreStorageSpec. -func (in *LustreStorageSpec) DeepCopy() *LustreStorageSpec { - if in == nil { - return nil - } - out := new(LustreStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfAccess) DeepCopyInto(out *NnfAccess) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccess. -func (in *NnfAccess) DeepCopy() *NnfAccess { - if in == nil { - return nil - } - out := new(NnfAccess) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfAccess) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfAccessList) DeepCopyInto(out *NnfAccessList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfAccess, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessList. -func (in *NnfAccessList) DeepCopy() *NnfAccessList { - if in == nil { - return nil - } - out := new(NnfAccessList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfAccessList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfAccessSpec) DeepCopyInto(out *NnfAccessSpec) { - *out = *in - out.ClientReference = in.ClientReference - out.StorageReference = in.StorageReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessSpec. -func (in *NnfAccessSpec) DeepCopy() *NnfAccessSpec { - if in == nil { - return nil - } - out := new(NnfAccessSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfAccessStatus) DeepCopyInto(out *NnfAccessStatus) { - *out = *in - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfAccessStatus. -func (in *NnfAccessStatus) DeepCopy() *NnfAccessStatus { - if in == nil { - return nil - } - out := new(NnfAccessStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfile) DeepCopyInto(out *NnfContainerProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfile. -func (in *NnfContainerProfile) DeepCopy() *NnfContainerProfile { - if in == nil { - return nil - } - out := new(NnfContainerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfContainerProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfileData) DeepCopyInto(out *NnfContainerProfileData) { - *out = *in - if in.Storages != nil { - in, out := &in.Storages, &out.Storages - *out = make([]NnfContainerProfileStorage, len(*in)) - copy(*out, *in) - } - if in.PreRunTimeoutSeconds != nil { - in, out := &in.PreRunTimeoutSeconds, &out.PreRunTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.PostRunTimeoutSeconds != nil { - in, out := &in.PostRunTimeoutSeconds, &out.PostRunTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.UserID != nil { - in, out := &in.UserID, &out.UserID - *out = new(uint32) - **out = **in - } - if in.GroupID != nil { - in, out := &in.GroupID, &out.GroupID - *out = new(uint32) - **out = **in - } - if in.Spec != nil { - in, out := &in.Spec, &out.Spec - *out = new(v1.PodSpec) - (*in).DeepCopyInto(*out) - } - if in.MPISpec != nil { - in, out := &in.MPISpec, &out.MPISpec - *out = new(v2beta1.MPIJobSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileData. -func (in *NnfContainerProfileData) DeepCopy() *NnfContainerProfileData { - if in == nil { - return nil - } - out := new(NnfContainerProfileData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfileList) DeepCopyInto(out *NnfContainerProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfContainerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileList. -func (in *NnfContainerProfileList) DeepCopy() *NnfContainerProfileList { - if in == nil { - return nil - } - out := new(NnfContainerProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfContainerProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfContainerProfileStorage) DeepCopyInto(out *NnfContainerProfileStorage) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfContainerProfileStorage. -func (in *NnfContainerProfileStorage) DeepCopy() *NnfContainerProfileStorage { - if in == nil { - return nil - } - out := new(NnfContainerProfileStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovement) DeepCopyInto(out *NnfDataMovement) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovement. -func (in *NnfDataMovement) DeepCopy() *NnfDataMovement { - if in == nil { - return nil - } - out := new(NnfDataMovement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovement) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementCommandStatus) DeepCopyInto(out *NnfDataMovementCommandStatus) { - *out = *in - out.ElapsedTime = in.ElapsedTime - if in.ProgressPercentage != nil { - in, out := &in.ProgressPercentage, &out.ProgressPercentage - *out = new(int32) - **out = **in - } - in.LastMessageTime.DeepCopyInto(&out.LastMessageTime) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = new(int32) - **out = **in - } - if in.Directories != nil { - in, out := &in.Directories, &out.Directories - *out = new(int32) - **out = **in - } - if in.Files != nil { - in, out := &in.Files, &out.Files - *out = new(int32) - **out = **in - } - if in.Links != nil { - in, out := &in.Links, &out.Links - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementCommandStatus. -func (in *NnfDataMovementCommandStatus) DeepCopy() *NnfDataMovementCommandStatus { - if in == nil { - return nil - } - out := new(NnfDataMovementCommandStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementConfig) DeepCopyInto(out *NnfDataMovementConfig) { - *out = *in - if in.Slots != nil { - in, out := &in.Slots, &out.Slots - *out = new(int) - **out = **in - } - if in.MaxSlots != nil { - in, out := &in.MaxSlots, &out.MaxSlots - *out = new(int) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementConfig. -func (in *NnfDataMovementConfig) DeepCopy() *NnfDataMovementConfig { - if in == nil { - return nil - } - out := new(NnfDataMovementConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementList) DeepCopyInto(out *NnfDataMovementList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfDataMovement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementList. -func (in *NnfDataMovementList) DeepCopy() *NnfDataMovementList { - if in == nil { - return nil - } - out := new(NnfDataMovementList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovementList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementManager) DeepCopyInto(out *NnfDataMovementManager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManager. -func (in *NnfDataMovementManager) DeepCopy() *NnfDataMovementManager { - if in == nil { - return nil - } - out := new(NnfDataMovementManager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovementManager) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementManagerList) DeepCopyInto(out *NnfDataMovementManagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfDataMovementManager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerList. -func (in *NnfDataMovementManagerList) DeepCopy() *NnfDataMovementManagerList { - if in == nil { - return nil - } - out := new(NnfDataMovementManagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovementManagerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementManagerSpec) DeepCopyInto(out *NnfDataMovementManagerSpec) { - *out = *in - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerSpec. -func (in *NnfDataMovementManagerSpec) DeepCopy() *NnfDataMovementManagerSpec { - if in == nil { - return nil - } - out := new(NnfDataMovementManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementManagerStatus) DeepCopyInto(out *NnfDataMovementManagerStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementManagerStatus. -func (in *NnfDataMovementManagerStatus) DeepCopy() *NnfDataMovementManagerStatus { - if in == nil { - return nil - } - out := new(NnfDataMovementManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementProfile) DeepCopyInto(out *NnfDataMovementProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Data = in.Data -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfile. -func (in *NnfDataMovementProfile) DeepCopy() *NnfDataMovementProfile { - if in == nil { - return nil - } - out := new(NnfDataMovementProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovementProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementProfileData) DeepCopyInto(out *NnfDataMovementProfileData) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileData. -func (in *NnfDataMovementProfileData) DeepCopy() *NnfDataMovementProfileData { - if in == nil { - return nil - } - out := new(NnfDataMovementProfileData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementProfileList) DeepCopyInto(out *NnfDataMovementProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfDataMovementProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementProfileList. -func (in *NnfDataMovementProfileList) DeepCopy() *NnfDataMovementProfileList { - if in == nil { - return nil - } - out := new(NnfDataMovementProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfDataMovementProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementSpec) DeepCopyInto(out *NnfDataMovementSpec) { - *out = *in - if in.Source != nil { - in, out := &in.Source, &out.Source - *out = new(NnfDataMovementSpecSourceDestination) - **out = **in - } - if in.Destination != nil { - in, out := &in.Destination, &out.Destination - *out = new(NnfDataMovementSpecSourceDestination) - **out = **in - } - out.ProfileReference = in.ProfileReference - if in.UserConfig != nil { - in, out := &in.UserConfig, &out.UserConfig - *out = new(NnfDataMovementConfig) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpec. -func (in *NnfDataMovementSpec) DeepCopy() *NnfDataMovementSpec { - if in == nil { - return nil - } - out := new(NnfDataMovementSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementSpecSourceDestination) DeepCopyInto(out *NnfDataMovementSpecSourceDestination) { - *out = *in - out.StorageReference = in.StorageReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementSpecSourceDestination. -func (in *NnfDataMovementSpecSourceDestination) DeepCopy() *NnfDataMovementSpecSourceDestination { - if in == nil { - return nil - } - out := new(NnfDataMovementSpecSourceDestination) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDataMovementStatus) DeepCopyInto(out *NnfDataMovementStatus) { - *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.EndTime != nil { - in, out := &in.EndTime, &out.EndTime - *out = (*in).DeepCopy() - } - if in.CommandStatus != nil { - in, out := &in.CommandStatus, &out.CommandStatus - *out = new(NnfDataMovementCommandStatus) - (*in).DeepCopyInto(*out) - } - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDataMovementStatus. -func (in *NnfDataMovementStatus) DeepCopy() *NnfDataMovementStatus { - if in == nil { - return nil - } - out := new(NnfDataMovementStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfDriveStatus) DeepCopyInto(out *NnfDriveStatus) { - *out = *in - out.NnfResourceStatus = in.NnfResourceStatus -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfDriveStatus. -func (in *NnfDriveStatus) DeepCopy() *NnfDriveStatus { - if in == nil { - return nil - } - out := new(NnfDriveStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfLustreMGT) DeepCopyInto(out *NnfLustreMGT) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGT. -func (in *NnfLustreMGT) DeepCopy() *NnfLustreMGT { - if in == nil { - return nil - } - out := new(NnfLustreMGT) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfLustreMGT) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfLustreMGTList) DeepCopyInto(out *NnfLustreMGTList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfLustreMGT, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTList. -func (in *NnfLustreMGTList) DeepCopy() *NnfLustreMGTList { - if in == nil { - return nil - } - out := new(NnfLustreMGTList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfLustreMGTList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfLustreMGTSpec) DeepCopyInto(out *NnfLustreMGTSpec) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FsNameBlackList != nil { - in, out := &in.FsNameBlackList, &out.FsNameBlackList - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.FsNameStartReference = in.FsNameStartReference - if in.ClaimList != nil { - in, out := &in.ClaimList, &out.ClaimList - *out = make([]v1.ObjectReference, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTSpec. -func (in *NnfLustreMGTSpec) DeepCopy() *NnfLustreMGTSpec { - if in == nil { - return nil - } - out := new(NnfLustreMGTSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfLustreMGTStatus) DeepCopyInto(out *NnfLustreMGTStatus) { - *out = *in - if in.ClaimList != nil { - in, out := &in.ClaimList, &out.ClaimList - *out = make([]NnfLustreMGTStatusClaim, len(*in)) - copy(*out, *in) - } - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatus. -func (in *NnfLustreMGTStatus) DeepCopy() *NnfLustreMGTStatus { - if in == nil { - return nil - } - out := new(NnfLustreMGTStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfLustreMGTStatusClaim) DeepCopyInto(out *NnfLustreMGTStatusClaim) { - *out = *in - out.Reference = in.Reference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfLustreMGTStatusClaim. -func (in *NnfLustreMGTStatusClaim) DeepCopy() *NnfLustreMGTStatusClaim { - if in == nil { - return nil - } - out := new(NnfLustreMGTStatusClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNode) DeepCopyInto(out *NnfNode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNode. -func (in *NnfNode) DeepCopy() *NnfNode { - if in == nil { - return nil - } - out := new(NnfNode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorage) DeepCopyInto(out *NnfNodeBlockStorage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorage. -func (in *NnfNodeBlockStorage) DeepCopy() *NnfNodeBlockStorage { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeBlockStorage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageAccessStatus) DeepCopyInto(out *NnfNodeBlockStorageAccessStatus) { - *out = *in - if in.DevicePaths != nil { - in, out := &in.DevicePaths, &out.DevicePaths - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAccessStatus. -func (in *NnfNodeBlockStorageAccessStatus) DeepCopy() *NnfNodeBlockStorageAccessStatus { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageAccessStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageAllocationSpec) DeepCopyInto(out *NnfNodeBlockStorageAllocationSpec) { - *out = *in - if in.Access != nil { - in, out := &in.Access, &out.Access - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationSpec. -func (in *NnfNodeBlockStorageAllocationSpec) DeepCopy() *NnfNodeBlockStorageAllocationSpec { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageAllocationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageAllocationStatus) DeepCopyInto(out *NnfNodeBlockStorageAllocationStatus) { - *out = *in - if in.Accesses != nil { - in, out := &in.Accesses, &out.Accesses - *out = make(map[string]NnfNodeBlockStorageAccessStatus, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]NnfNodeBlockStorageDeviceStatus, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationStatus. -func (in *NnfNodeBlockStorageAllocationStatus) DeepCopy() *NnfNodeBlockStorageAllocationStatus { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageAllocationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageDeviceStatus) DeepCopyInto(out *NnfNodeBlockStorageDeviceStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageDeviceStatus. -func (in *NnfNodeBlockStorageDeviceStatus) DeepCopy() *NnfNodeBlockStorageDeviceStatus { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageDeviceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageList) DeepCopyInto(out *NnfNodeBlockStorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfNodeBlockStorage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageList. -func (in *NnfNodeBlockStorageList) DeepCopy() *NnfNodeBlockStorageList { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeBlockStorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageSpec) DeepCopyInto(out *NnfNodeBlockStorageSpec) { - *out = *in - if in.Allocations != nil { - in, out := &in.Allocations, &out.Allocations - *out = make([]NnfNodeBlockStorageAllocationSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageSpec. -func (in *NnfNodeBlockStorageSpec) DeepCopy() *NnfNodeBlockStorageSpec { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeBlockStorageStatus) DeepCopyInto(out *NnfNodeBlockStorageStatus) { - *out = *in - if in.Allocations != nil { - in, out := &in.Allocations, &out.Allocations - *out = make([]NnfNodeBlockStorageAllocationStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.ResourceError.DeepCopyInto(&out.ResourceError) - in.PodStartTime.DeepCopyInto(&out.PodStartTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageStatus. -func (in *NnfNodeBlockStorageStatus) DeepCopy() *NnfNodeBlockStorageStatus { - if in == nil { - return nil - } - out := new(NnfNodeBlockStorageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeECData) DeepCopyInto(out *NnfNodeECData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECData. -func (in *NnfNodeECData) DeepCopy() *NnfNodeECData { - if in == nil { - return nil - } - out := new(NnfNodeECData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeECData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeECDataList) DeepCopyInto(out *NnfNodeECDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfNodeECData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataList. -func (in *NnfNodeECDataList) DeepCopy() *NnfNodeECDataList { - if in == nil { - return nil - } - out := new(NnfNodeECDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeECDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeECDataSpec) DeepCopyInto(out *NnfNodeECDataSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataSpec. -func (in *NnfNodeECDataSpec) DeepCopy() *NnfNodeECDataSpec { - if in == nil { - return nil - } - out := new(NnfNodeECDataSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeECDataStatus) DeepCopyInto(out *NnfNodeECDataStatus) { - *out = *in - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]NnfNodeECPrivateData, len(*in)) - for key, val := range *in { - var outVal map[string]string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal - *out = make(NnfNodeECPrivateData, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECDataStatus. -func (in *NnfNodeECDataStatus) DeepCopy() *NnfNodeECDataStatus { - if in == nil { - return nil - } - out := new(NnfNodeECDataStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in NnfNodeECPrivateData) DeepCopyInto(out *NnfNodeECPrivateData) { - { - in := &in - *out = make(NnfNodeECPrivateData, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeECPrivateData. -func (in NnfNodeECPrivateData) DeepCopy() NnfNodeECPrivateData { - if in == nil { - return nil - } - out := new(NnfNodeECPrivateData) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeList) DeepCopyInto(out *NnfNodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfNode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeList. -func (in *NnfNodeList) DeepCopy() *NnfNodeList { - if in == nil { - return nil - } - out := new(NnfNodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeSpec) DeepCopyInto(out *NnfNodeSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeSpec. -func (in *NnfNodeSpec) DeepCopy() *NnfNodeSpec { - if in == nil { - return nil - } - out := new(NnfNodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStatus) DeepCopyInto(out *NnfNodeStatus) { - *out = *in - if in.Servers != nil { - in, out := &in.Servers, &out.Servers - *out = make([]NnfServerStatus, len(*in)) - copy(*out, *in) - } - if in.Drives != nil { - in, out := &in.Drives, &out.Drives - *out = make([]NnfDriveStatus, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStatus. -func (in *NnfNodeStatus) DeepCopy() *NnfNodeStatus { - if in == nil { - return nil - } - out := new(NnfNodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorage. -func (in *NnfNodeStorage) DeepCopy() *NnfNodeStorage { - if in == nil { - return nil - } - out := new(NnfNodeStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeStorage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorageAllocationStatus) DeepCopyInto(out *NnfNodeStorageAllocationStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageAllocationStatus. -func (in *NnfNodeStorageAllocationStatus) DeepCopy() *NnfNodeStorageAllocationStatus { - if in == nil { - return nil - } - out := new(NnfNodeStorageAllocationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorageList) DeepCopyInto(out *NnfNodeStorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfNodeStorage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageList. -func (in *NnfNodeStorageList) DeepCopy() *NnfNodeStorageList { - if in == nil { - return nil - } - out := new(NnfNodeStorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { - *out = *in - out.LustreStorage = in.LustreStorage - out.BlockReference = in.BlockReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageSpec. -func (in *NnfNodeStorageSpec) DeepCopy() *NnfNodeStorageSpec { - if in == nil { - return nil - } - out := new(NnfNodeStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorageStatus) DeepCopyInto(out *NnfNodeStorageStatus) { - *out = *in - if in.Allocations != nil { - in, out := &in.Allocations, &out.Allocations - *out = make([]NnfNodeStorageAllocationStatus, len(*in)) - copy(*out, *in) - } - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageStatus. -func (in *NnfNodeStorageStatus) DeepCopy() *NnfNodeStorageStatus { - if in == nil { - return nil - } - out := new(NnfNodeStorageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManager) DeepCopyInto(out *NnfPortManager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManager. -func (in *NnfPortManager) DeepCopy() *NnfPortManager { - if in == nil { - return nil - } - out := new(NnfPortManager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfPortManager) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManagerAllocationSpec) DeepCopyInto(out *NnfPortManagerAllocationSpec) { - *out = *in - out.Requester = in.Requester -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationSpec. -func (in *NnfPortManagerAllocationSpec) DeepCopy() *NnfPortManagerAllocationSpec { - if in == nil { - return nil - } - out := new(NnfPortManagerAllocationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManagerAllocationStatus) DeepCopyInto(out *NnfPortManagerAllocationStatus) { - *out = *in - if in.Requester != nil { - in, out := &in.Requester, &out.Requester - *out = new(v1.ObjectReference) - **out = **in - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]uint16, len(*in)) - copy(*out, *in) - } - if in.TimeUnallocated != nil { - in, out := &in.TimeUnallocated, &out.TimeUnallocated - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerAllocationStatus. -func (in *NnfPortManagerAllocationStatus) DeepCopy() *NnfPortManagerAllocationStatus { - if in == nil { - return nil - } - out := new(NnfPortManagerAllocationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManagerList) DeepCopyInto(out *NnfPortManagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfPortManager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerList. -func (in *NnfPortManagerList) DeepCopy() *NnfPortManagerList { - if in == nil { - return nil - } - out := new(NnfPortManagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfPortManagerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManagerSpec) DeepCopyInto(out *NnfPortManagerSpec) { - *out = *in - out.SystemConfiguration = in.SystemConfiguration - if in.Allocations != nil { - in, out := &in.Allocations, &out.Allocations - *out = make([]NnfPortManagerAllocationSpec, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerSpec. -func (in *NnfPortManagerSpec) DeepCopy() *NnfPortManagerSpec { - if in == nil { - return nil - } - out := new(NnfPortManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfPortManagerStatus) DeepCopyInto(out *NnfPortManagerStatus) { - *out = *in - if in.Allocations != nil { - in, out := &in.Allocations, &out.Allocations - *out = make([]NnfPortManagerAllocationStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfPortManagerStatus. -func (in *NnfPortManagerStatus) DeepCopy() *NnfPortManagerStatus { - if in == nil { - return nil - } - out := new(NnfPortManagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfResourceStatus) DeepCopyInto(out *NnfResourceStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfResourceStatus. -func (in *NnfResourceStatus) DeepCopy() *NnfResourceStatus { - if in == nil { - return nil - } - out := new(NnfResourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfServerStatus) DeepCopyInto(out *NnfServerStatus) { - *out = *in - out.NnfResourceStatus = in.NnfResourceStatus -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfServerStatus. -func (in *NnfServerStatus) DeepCopy() *NnfServerStatus { - if in == nil { - return nil - } - out := new(NnfServerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorage) DeepCopyInto(out *NnfStorage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorage. -func (in *NnfStorage) DeepCopy() *NnfStorage { - if in == nil { - return nil - } - out := new(NnfStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfStorage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageAllocationNodes) DeepCopyInto(out *NnfStorageAllocationNodes) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationNodes. -func (in *NnfStorageAllocationNodes) DeepCopy() *NnfStorageAllocationNodes { - if in == nil { - return nil - } - out := new(NnfStorageAllocationNodes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageAllocationSetSpec) DeepCopyInto(out *NnfStorageAllocationSetSpec) { - *out = *in - out.NnfStorageLustreSpec = in.NnfStorageLustreSpec - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]NnfStorageAllocationNodes, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetSpec. -func (in *NnfStorageAllocationSetSpec) DeepCopy() *NnfStorageAllocationSetSpec { - if in == nil { - return nil - } - out := new(NnfStorageAllocationSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageAllocationSetStatus) DeepCopyInto(out *NnfStorageAllocationSetStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageAllocationSetStatus. -func (in *NnfStorageAllocationSetStatus) DeepCopy() *NnfStorageAllocationSetStatus { - if in == nil { - return nil - } - out := new(NnfStorageAllocationSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageList) DeepCopyInto(out *NnfStorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfStorage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageList. -func (in *NnfStorageList) DeepCopy() *NnfStorageList { - if in == nil { - return nil - } - out := new(NnfStorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfStorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageLustreSpec) DeepCopyInto(out *NnfStorageLustreSpec) { - *out = *in - out.PersistentMgsReference = in.PersistentMgsReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreSpec. -func (in *NnfStorageLustreSpec) DeepCopy() *NnfStorageLustreSpec { - if in == nil { - return nil - } - out := new(NnfStorageLustreSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageLustreStatus) DeepCopyInto(out *NnfStorageLustreStatus) { - *out = *in - out.LustreMgtReference = in.LustreMgtReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreStatus. -func (in *NnfStorageLustreStatus) DeepCopy() *NnfStorageLustreStatus { - if in == nil { - return nil - } - out := new(NnfStorageLustreStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfile) DeepCopyInto(out *NnfStorageProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfile. -func (in *NnfStorageProfile) DeepCopy() *NnfStorageProfile { - if in == nil { - return nil - } - out := new(NnfStorageProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines) { - *out = *in - out.VgChange = in.VgChange - out.LvChange = in.LvChange -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileCmdLines. -func (in *NnfStorageProfileCmdLines) DeepCopy() *NnfStorageProfileCmdLines { - if in == nil { - return nil - } - out := new(NnfStorageProfileCmdLines) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileData) DeepCopyInto(out *NnfStorageProfileData) { - *out = *in - in.LustreStorage.DeepCopyInto(&out.LustreStorage) - in.GFS2Storage.DeepCopyInto(&out.GFS2Storage) - in.XFSStorage.DeepCopyInto(&out.XFSStorage) - in.RawStorage.DeepCopyInto(&out.RawStorage) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileData. -func (in *NnfStorageProfileData) DeepCopy() *NnfStorageProfileData { - if in == nil { - return nil - } - out := new(NnfStorageProfileData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileGFS2Data) DeepCopyInto(out *NnfStorageProfileGFS2Data) { - *out = *in - out.CmdLines = in.CmdLines - if in.StorageLabels != nil { - in, out := &in.StorageLabels, &out.StorageLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileGFS2Data. -func (in *NnfStorageProfileGFS2Data) DeepCopy() *NnfStorageProfileGFS2Data { - if in == nil { - return nil - } - out := new(NnfStorageProfileGFS2Data) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMLvChangeCmdLines) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMLvChangeCmdLines. -func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopy() *NnfStorageProfileLVMLvChangeCmdLines { - if in == nil { - return nil - } - out := new(NnfStorageProfileLVMLvChangeCmdLines) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMVgChangeCmdLines) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMVgChangeCmdLines. -func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopy() *NnfStorageProfileLVMVgChangeCmdLines { - if in == nil { - return nil - } - out := new(NnfStorageProfileLVMVgChangeCmdLines) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileList) DeepCopyInto(out *NnfStorageProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfStorageProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileList. -func (in *NnfStorageProfileList) DeepCopy() *NnfStorageProfileList { - if in == nil { - return nil - } - out := new(NnfStorageProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfStorageProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileLustreCmdLines) DeepCopyInto(out *NnfStorageProfileLustreCmdLines) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreCmdLines. -func (in *NnfStorageProfileLustreCmdLines) DeepCopy() *NnfStorageProfileLustreCmdLines { - if in == nil { - return nil - } - out := new(NnfStorageProfileLustreCmdLines) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileLustreData) DeepCopyInto(out *NnfStorageProfileLustreData) { - *out = *in - out.MgtCmdLines = in.MgtCmdLines - out.MdtCmdLines = in.MdtCmdLines - out.MgtMdtCmdLines = in.MgtMdtCmdLines - out.OstCmdLines = in.OstCmdLines - in.MgtOptions.DeepCopyInto(&out.MgtOptions) - in.MdtOptions.DeepCopyInto(&out.MdtOptions) - in.MgtMdtOptions.DeepCopyInto(&out.MgtMdtOptions) - in.OstOptions.DeepCopyInto(&out.OstOptions) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreData. -func (in *NnfStorageProfileLustreData) DeepCopy() *NnfStorageProfileLustreData { - if in == nil { - return nil - } - out := new(NnfStorageProfileLustreData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileLustreMiscOptions) DeepCopyInto(out *NnfStorageProfileLustreMiscOptions) { - *out = *in - if in.StorageLabels != nil { - in, out := &in.StorageLabels, &out.StorageLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLustreMiscOptions. -func (in *NnfStorageProfileLustreMiscOptions) DeepCopy() *NnfStorageProfileLustreMiscOptions { - if in == nil { - return nil - } - out := new(NnfStorageProfileLustreMiscOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileRawData) DeepCopyInto(out *NnfStorageProfileRawData) { - *out = *in - out.CmdLines = in.CmdLines - if in.StorageLabels != nil { - in, out := &in.StorageLabels, &out.StorageLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileRawData. -func (in *NnfStorageProfileRawData) DeepCopy() *NnfStorageProfileRawData { - if in == nil { - return nil - } - out := new(NnfStorageProfileRawData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileXFSData) DeepCopyInto(out *NnfStorageProfileXFSData) { - *out = *in - out.CmdLines = in.CmdLines - if in.StorageLabels != nil { - in, out := &in.StorageLabels, &out.StorageLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileXFSData. -func (in *NnfStorageProfileXFSData) DeepCopy() *NnfStorageProfileXFSData { - if in == nil { - return nil - } - out := new(NnfStorageProfileXFSData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageSpec) DeepCopyInto(out *NnfStorageSpec) { - *out = *in - if in.AllocationSets != nil { - in, out := &in.AllocationSets, &out.AllocationSets - *out = make([]NnfStorageAllocationSetSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageSpec. -func (in *NnfStorageSpec) DeepCopy() *NnfStorageSpec { - if in == nil { - return nil - } - out := new(NnfStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageStatus) DeepCopyInto(out *NnfStorageStatus) { - *out = *in - out.NnfStorageLustreStatus = in.NnfStorageLustreStatus - if in.AllocationSets != nil { - in, out := &in.AllocationSets, &out.AllocationSets - *out = make([]NnfStorageAllocationSetStatus, len(*in)) - copy(*out, *in) - } - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageStatus. -func (in *NnfStorageStatus) DeepCopy() *NnfStorageStatus { - if in == nil { - return nil - } - out := new(NnfStorageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfSystemStorage) DeepCopyInto(out *NnfSystemStorage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorage. -func (in *NnfSystemStorage) DeepCopy() *NnfSystemStorage { - if in == nil { - return nil - } - out := new(NnfSystemStorage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfSystemStorage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfSystemStorageList) DeepCopyInto(out *NnfSystemStorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NnfSystemStorage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageList. -func (in *NnfSystemStorageList) DeepCopy() *NnfSystemStorageList { - if in == nil { - return nil - } - out := new(NnfSystemStorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NnfSystemStorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfSystemStorageSpec) DeepCopyInto(out *NnfSystemStorageSpec) { - *out = *in - out.SystemConfiguration = in.SystemConfiguration - if in.ExcludeRabbits != nil { - in, out := &in.ExcludeRabbits, &out.ExcludeRabbits - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IncludeRabbits != nil { - in, out := &in.IncludeRabbits, &out.IncludeRabbits - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExcludeComputes != nil { - in, out := &in.ExcludeComputes, &out.ExcludeComputes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IncludeComputes != nil { - in, out := &in.IncludeComputes, &out.IncludeComputes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ComputesPattern != nil { - in, out := &in.ComputesPattern, &out.ComputesPattern - *out = make([]int, len(*in)) - copy(*out, *in) - } - out.StorageProfile = in.StorageProfile -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageSpec. -func (in *NnfSystemStorageSpec) DeepCopy() *NnfSystemStorageSpec { - if in == nil { - return nil - } - out := new(NnfSystemStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfSystemStorageStatus) DeepCopyInto(out *NnfSystemStorageStatus) { - *out = *in - in.ResourceError.DeepCopyInto(&out.ResourceError) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfSystemStorageStatus. -func (in *NnfSystemStorageStatus) DeepCopy() *NnfSystemStorageStatus { - if in == nil { - return nil - } - out := new(NnfSystemStorageStatus) - in.DeepCopyInto(out) - return out -} diff --git a/cmd/main.go b/cmd/main.go index 75f9b3c8..3a706624 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -50,8 +50,6 @@ import ( mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" - controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" @@ -76,7 +74,6 @@ const ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(nnfv1alpha1.AddToScheme(scheme)) utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) utilruntime.Must(lusv1beta1.AddToScheme(scheme)) diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml index e070bfdf..c9c82df9 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml @@ -14,256 +14,6 @@ spec: singular: nnfaccess scope: Namespaced versions: - - additionalPrinterColumns: - - description: The desired state - jsonPath: .spec.desiredState - name: DESIREDSTATE - type: string - - description: The current state - jsonPath: .status.state - name: STATE - type: string - - description: Whether the state has been achieved - jsonPath: .status.ready - name: READY - type: boolean - - jsonPath: .status.error.severity - name: ERROR - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfAccess is the Schema for the nnfaccesses API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfAccessSpec defines the desired state of NnfAccess - properties: - clientReference: - description: |- - ClientReference is for a client resource. (DWS) Computes is the only client - resource type currently supported - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - desiredState: - description: DesiredState is the desired state for the mounts on the - client - enum: - - mounted - - unmounted - type: string - groupID: - description: GroupID for the new mount. Currently only used for raw - format: int32 - type: integer - makeClientMounts: - default: true - description: |- - MakeClientMounts determines whether the ClientMount resources are made, or if only - the access list on the NnfNodeBlockStorage is updated - type: boolean - mountPath: - description: MountPath for the storage target on the client - type: string - mountPathPrefix: - type: string - storageReference: - description: StorageReference is the NnfStorage reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - target: - description: |- - Target specifies which storage targets the client should mount - - single: Only one of the storage the client can access - - all: All of the storage the client can access - - shared: Multiple clients access the same storage - enum: - - single - - all - - shared - type: string - teardownState: - allOf: - - enum: - - Proposal - - Setup - - DataIn - - PreRun - - PostRun - - DataOut - - Teardown - - enum: - - PreRun - - PostRun - - Teardown - description: |- - TeardownState is the desired state of the workflow for this NNF Access resource to - be torn down and deleted. - type: string - userID: - description: UserID for the new mount. Currently only used for raw - format: int32 - type: integer - required: - - desiredState - - groupID - - makeClientMounts - - storageReference - - target - - teardownState - - userID - type: object - status: - description: NnfAccessStatus defines the observed state of NnfAccess - properties: - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - ready: - description: Ready signifies whether status.state has been achieved - type: boolean - state: - description: State is the current state - enum: - - mounted - - unmounted - type: string - required: - - ready - - state - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - description: The desired state jsonPath: .spec.desiredState diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml index 41a62995..a2f61565 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfcontainerprofiles.yaml @@ -14,14851 +14,6 @@ spec: singular: nnfcontainerprofile scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfContainerProfile is the Schema for the nnfcontainerprofiles - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - data: - description: NnfContainerProfileSpec defines the desired state of NnfContainerProfile - properties: - groupID: - description: |- - GroupID specifies the group ID that is allowed to use this profile. If this is specified, - only Workflows that have a matching group ID can select this profile. - format: int32 - type: integer - mpiSpec: - description: |- - MPIJobSpec to define the MPI containers created from this profile. This functionality is - provided via mpi-operator, a 3rd party tool to assist in running MPI applications across - worker containers. - Either this or Spec must be provided, but not both. - - - All the fields defined drive mpi-operator behavior. See the type definition of MPISpec for - more detail: - https://github.com/kubeflow/mpi-operator/blob/v0.4.0/pkg/apis/kubeflow/v2beta1/types.go#L137 - - - Note: most of these fields are fully customizable with a few exceptions. These fields are - overridden by NNF software to ensure proper behavior to interface with the DWS workflow - - Replicas - - RunPolicy.BackoffLimit (this is set above by `RetryLimit`) - - Worker/Launcher.RestartPolicy - properties: - mpiImplementation: - default: OpenMPI - description: |- - MPIImplementation is the MPI implementation. - Options are "OpenMPI" (default) and "Intel". - enum: - - OpenMPI - - Intel - type: string - mpiReplicaSpecs: - additionalProperties: - description: ReplicaSpec is a description of the replica - properties: - replicas: - description: |- - Replicas is the desired number of replicas of the given template. - If unspecified, defaults to 1. - format: int32 - type: integer - restartPolicy: - description: |- - Restart policy for all replicas within the job. - One of Always, OnFailure, Never and ExitCode. - Default to Never. - type: string - template: - description: |- - Template is the object that describes the pod that - will be created for this replica. RestartPolicy in PodTemplateSpec - will be overide by RestartPolicy in ReplicaSpec - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - type: object - spec: - description: |- - Specification of the desired behavior of the pod. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - activeDeadlineSeconds: - description: |- - Optional duration in seconds the pod may be active on the node relative to - StartTime before the system will actively try to mark it failed and kill associated containers. - Value must be a positive integer. - format: int64 - type: integer - affinity: - description: If specified, the pod's scheduling - constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling - rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, - associated with the corresponding - weight. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with - matching the corresponding nodeSelectorTerm, - in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node - selector terms. The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector - requirements by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector - requirements by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key - that the selector applies - to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - type: array - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling - rules (e.g. co-locate this pod in the same - node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling - rules (e.g. avoid putting this pod in the - same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the - matched WeightedPodAffinityTerm fields - are added per-node to find the most - preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity - term, associated with the corresponding - weight. - properties: - labelSelector: - description: A label query over - a set of resources, in this - case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a - set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - automountServiceAccountToken: - description: AutomountServiceAccountToken indicates - whether a service account token should be automatically - mounted. - type: boolean - containers: - description: |- - List of containers belonging to the pod. - Containers cannot currently be added or removed. - There must be at least one container in a Pod. - Cannot be updated. - items: - description: A single application container that - you want to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment - variable present in a Container. - properties: - name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment - variable's value. Cannot be used if - value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a - ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a - secret in the pod's namespace - properties: - key: - description: The key of the - secret to select from. Must - be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the - source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select - from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier - to prepend to each key in the ConfigMap. - Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a - network port in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the - external port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the - container. - items: - description: ContainerResizePolicy represents - resource resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references - one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level - label that applies to the container. - type: string - role: - description: Role is a SELinux role - label that applies to the container. - type: string - type: - description: Type is a SELinux type - label that applies to the container. - type: string - user: - description: User is a SELinux user - label that applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName - is the name of the GMSA credential - spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of - block devices to be used by the container. - items: - description: volumeDevice describes a mapping - of a raw block device within a container. - properties: - devicePath: - description: devicePath is the path - inside of the container that the device - will be mapped to. - type: string - name: - description: name must match the name - of a persistentVolumeClaim in the - pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting - of a Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name - of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. - properties: - nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. - items: - type: string - type: array - options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS - resolver options of a pod. - properties: - name: - description: Required. - type: string - value: - type: string - type: object - type: array - searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. - items: - type: string - type: array - type: object - dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: |- - EnableServiceLinks indicates whether information about services should be injected into pod's - environment variables, matching the syntax of Docker links. - Optional: Defaults to true. - type: boolean - ephemeralContainers: - description: |- - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. This list cannot be specified when - creating a pod, and it cannot be modified by updating the pod spec. In order to add an - ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - items: - description: |- - An EphemeralContainer is a temporary container that you may add to an existing Pod for - user-initiated activities such as debugging. Ephemeral containers have no resource or - scheduling guarantees, and they will not be restarted when they exit or when a Pod is - removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the - Pod to exceed its resource allocation. - - - To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - properties: - args: - description: |- - Arguments to the entrypoint. - The image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment - variable present in a Container. - properties: - name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment - variable's value. Cannot be used if - value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a - ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a - secret in the pod's namespace - properties: - key: - description: The key of the - secret to select from. Must - be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the - source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select - from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier - to prepend to each key in the ConfigMap. - Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: Lifecycle is not allowed for - ephemeral containers. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: Probes are not allowed for ephemeral - containers. - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the ephemeral container specified as a DNS_LABEL. - This name must be unique among all containers, init containers and ephemeral containers. - type: string - ports: - description: Ports are not allowed for ephemeral - containers. - items: - description: ContainerPort represents a - network port in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the - external port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: Probes are not allowed for ephemeral - containers. - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the - container. - items: - description: ContainerResizePolicy represents - resource resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - already allocated to the pod. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references - one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for the container to manage the restart behavior of each - container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. - type: string - securityContext: - description: |- - Optional: SecurityContext defines the security options the ephemeral container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level - label that applies to the container. - type: string - role: - description: Role is a SELinux role - label that applies to the container. - type: string - type: - description: Type is a SELinux type - label that applies to the container. - type: string - user: - description: User is a SELinux user - label that applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName - is the name of the GMSA credential - spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: Probes are not allowed for ephemeral - containers. - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - targetContainerName: - description: |- - If set, the name of the container from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - If not set then the ephemeral container uses the namespaces configured in the Pod spec. - - - The container runtime must implement support for this feature. If the runtime does not - support namespace targeting then the result of setting this field is undefined. - type: string - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of - block devices to be used by the container. - items: - description: volumeDevice describes a mapping - of a raw block device within a container. - properties: - devicePath: - description: devicePath is the path - inside of the container that the device - will be mapped to. - type: string - name: - description: name must match the name - of a persistentVolumeClaim in the - pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - Cannot be updated. - items: - description: VolumeMount describes a mounting - of a Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name - of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - hostAliases: - description: |- - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - ip: - description: IP address of the host file entry. - type: string - type: object - type: array - hostIPC: - description: |- - Use the host's ipc namespace. - Optional: Default to false. - type: boolean - hostNetwork: - description: |- - Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. - Default to false. - type: boolean - hostPID: - description: |- - Use the host's pid namespace. - Optional: Default to false. - type: boolean - hostUsers: - description: |- - Use the host's user namespace. - Optional: Default to true. - If set to true or not present, the pod will be run in the host user namespace, useful - for when the pod needs a feature only available to the host user namespace, such as - loading a kernel module with CAP_SYS_MODULE. - When set to false, a new userns is created for the pod. Setting false is useful for - mitigating container breakout vulnerabilities even allowing users to run their - containers as root without actually having root privileges on the host. - This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - type: boolean - hostname: - description: |- - Specifies the hostname of the Pod - If not specified, the pod's hostname will be set to a system-defined value. - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - If specified, these secrets will be passed to individual puller implementations for them to use. - More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: array - initContainers: - description: |- - List of initialization containers belonging to the pod. - Init containers are executed in order prior to containers being started. If any - init container fails, the pod is considered to have failed and is handled according - to its restartPolicy. The name for an init container or normal container must be - unique among all containers. - Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken into account during scheduling - by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers - in a similar fashion. - Init containers cannot currently be added or removed. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - items: - description: A single application container that - you want to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment - variable present in a Container. - properties: - name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment - variable's value. Cannot be used if - value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a - ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a - secret in the pod's namespace - properties: - key: - description: The key of the - secret to select from. Must - be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether - the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the - source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select - from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier - to prepend to each key in the ConfigMap. - Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the - Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to - set in the request. HTTP allows - repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on - the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a - network port in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the - external port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the - container. - items: - description: ContainerResizePolicy represents - resource resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references - one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level - label that applies to the container. - type: string - role: - description: Role is a SELinux role - label that applies to the container. - type: string - type: - description: Type is a SELinux type - label that applies to the container. - type: string - user: - description: User is a SELinux user - label that applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName - is the name of the GMSA credential - spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action - to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action - involving a GRPC port. - properties: - port: - description: Port number of the gRPC - service. Number must be in the range - 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action - involving a TCP port. - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the pod - IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of - block devices to be used by the container. - items: - description: volumeDevice describes a mapping - of a raw block device within a container. - properties: - devicePath: - description: devicePath is the path - inside of the container that the device - will be mapped to. - type: string - name: - description: name must match the name - of a persistentVolumeClaim in the - pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting - of a Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name - of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - nodeName: - description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - x-kubernetes-map-type: atomic - os: - description: |- - Specifies the OS of the containers in the pod. - Some pod and container fields are restricted if this is set. - - - If the OS field is set to linux, the following fields must be unset: - -securityContext.windowsOptions - - - If the OS field is set to windows, following fields must be unset: - - spec.hostPID - - spec.hostIPC - - spec.hostUsers - - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - - spec.shareProcessNamespace - - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - - spec.containers[*].securityContext.seLinuxOptions - - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup - properties: - name: - description: |- - Name is the name of the operating system. The currently supported values are linux and windows. - Additional value may be defined in future and can be one of: - https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values and treat unrecognized values in this field as os: null - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by the RuntimeClass admission controller. If - the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - The RuntimeClass admission controller will reject Pod create requests which have the overhead already - set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - type: object - preemptionPolicy: - description: |- - PreemptionPolicy is the Policy for preempting pods with lower priority. - One of Never, PreemptLowerPriority. - Defaults to PreemptLowerPriority if unset. - type: string - priority: - description: |- - The priority value. Various system components use this field to find the - priority of the pod. When Priority Admission Controller is enabled, it - prevents users from setting this field. The admission controller populates - this field from PriorityClassName. - The higher the value, the higher the priority. - format: int32 - type: integer - priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and - "system-cluster-critical" are two special keywords which indicate the - highest priorities with the former being the highest priority. Any other - name must be defined by creating a PriorityClass object with that name. - If not specified, the pod priority will be default or zero if there is no - default. - type: string - readinessGates: - description: |- - If specified, all readiness gates will be evaluated for pod readiness. - A pod is ready when all its containers are ready AND - all conditions specified in the readiness gates have status equal to "True" - More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - items: - description: PodReadinessGate contains the reference - to a pod condition - properties: - conditionType: - description: ConditionType refers to a condition - in the pod's condition list with matching - type. - type: string - required: - - conditionType - type: object - type: array - resourceClaims: - description: |- - ResourceClaims defines which ResourceClaims must be allocated - and reserved before the Pod is allowed to start. The resources - will be made available to those containers which consume them - by name. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. - items: - description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. - It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. - Containers that need access to the ResourceClaim reference it with this name. - properties: - name: - description: |- - Name uniquely identifies this resource claim inside the pod. - This must be a DNS_LABEL. - type: string - source: - description: Source describes where to find - the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. - - - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - - - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - restartPolicy: - description: |- - Restart policy for all containers within the pod. - One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - type: string - runtimeClassName: - description: |- - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - type: string - schedulerName: - description: |- - If specified, the pod will be dispatched by specified scheduler. - If not specified, the pod will be dispatched by default scheduler. - type: string - schedulingGates: - description: |- - SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - scheduler will not attempt to schedule the pod. - - - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. - items: - description: PodSchedulingGate is associated to - a Pod to guard its scheduling. - properties: - name: - description: |- - Name of the scheduling gate. - Each scheduling gate must have a unique name field. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - securityContext: - description: |- - SecurityContext holds pod-level security attributes and common container settings. - Optional: Defaults to empty. See type description for default values of each field. - properties: - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label - that applies to the container. - type: string - role: - description: Role is a SELinux role label - that applies to the container. - type: string - type: - description: Type is a SELinux type label - that applies to the container. - type: string - user: - description: User is a SELinux user label - that applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter - to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the - name of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - Deprecated: Use serviceAccountName instead. - type: string - serviceAccountName: - description: |- - ServiceAccountName is the name of the ServiceAccount to use to run this pod. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - type: string - setHostnameAsFQDN: - description: |- - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - If a pod does not have FQDN, this has no effect. - Default to false. - type: boolean - shareProcessNamespace: - description: |- - Share a single process namespace between all of the containers in a pod. - When this is set containers will be able to view and signal processes from other containers - in the same pod, and the first process in each container will not be assigned PID 1. - HostPID and ShareProcessNamespace cannot both be set. - Optional: Default to false. - type: boolean - subdomain: - description: |- - If specified, the fully qualified Pod hostname will be "...svc.". - If not specified, the pod will not have a domainname at all. - type: string - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - If this value is nil, the default grace period will be used instead. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - Defaults to 30 seconds. - format: int64 - type: integer - tolerations: - description: If specified, the pod's tolerations. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: |- - TopologySpreadConstraints describes how a group of pods ought to spread across topology - domains. Scheduler will schedule pods in a way which abides by the constraints. - All topologySpreadConstraints are ANDed. - items: - description: TopologySpreadConstraint specifies - how to spread matching pods among the given - topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - description: |- - List of volumes that can be mounted by containers belonging to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes - items: - description: Volume represents a named volume - in a pod that may be accessed by any container - in the pod. - properties: - awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - format: int32 - type: integer - readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: boolean - volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure - Data Disk mount on the host and bind mount - to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host - Caching mode: None, Read Only, Read - Write.' - type: string - diskName: - description: diskName is the Name of the - data disk in the blob storage - type: string - diskURI: - description: diskURI is the URI of data - disk in the blob storage - type: string - fsType: - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are - Shared: multiple blob disks per storage - account Dedicated: single blob disk - per storage account Managed: azure - managed data disk (only in managed availability - set). defaults to shared' - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure - File Service mount on the host and bind - mount to the pod. - properties: - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of - secret that contains Azure Storage Account - Name and Key - type: string - shareName: - description: shareName is the azure share - Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount - on the host that shares a pod's lifetime - properties: - monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - items: - type: string - type: array - path: - description: 'path is Optional: Used as - the mounted root, rather than the full - Ceph tree, default is /' - type: string - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: boolean - secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - required: - - monitors - type: object - cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: boolean - secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap - that should populate this volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify whether - the ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) - represents ephemeral storage that is handled - by certain external CSI drivers (Beta feature). - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward - API about the pod that should populate this - volume - properties: - defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: Items is a list of downward - API volume file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects - a field of the pod: only annotations, - labels, name and namespace are - supported.' - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file - to be created. Must not be absolute - or contain the ''..'' path. Must - be utf-8 encoded. The first item - of the relative path must not - start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type - of resource being referenced - type: string - name: - description: Name is the name - of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type - of resource being referenced - type: string - name: - description: Name is the name - of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim - references one entry in - PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label - query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is - the label key that - the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the - binding reference to the PersistentVolume - backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel - resource that is attached to a kubelet's - host machine and then exposed to the pod. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - lun: - description: 'lun is Optional: FC target - lun number' - format: int32 - type: integer - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: - FC target worldwide names (WWNs)' - items: - type: string - type: array - wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - items: - type: string - type: array - type: object - flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the - driver to use for this volume. - type: string - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this - field holds extra command options if - any.' - type: object - readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker - volume attached to a kubelet's host machine. - This depends on the Flocker control service - being running - properties: - datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of - the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - properties: - fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - format: int32 - type: integer - pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: boolean - required: - - pdName - type: object - gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. - properties: - directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash - for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md - properties: - endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines - whether support iSCSI Discovery CHAP - authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether - support iSCSI Session CHAP authentication - type: boolean - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified - Name. - type: string - iscsiInterface: - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target - Lun number. - format: int32 - type: integer - portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret - for iSCSI target and initiator authentication - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - properties: - path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: boolean - server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - properties: - claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - type: string - readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents - a PhotonController persistent disk attached - and mounted on kubelets host machine - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies - Photon Controller persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx - volume attached and mounted on kubelets - host machine - properties: - fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies - a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one - resources secrets, configmaps, and downward - API - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume - projections - items: - description: Projection that may be - projected along with other supported - volume types - properties: - configMap: - description: configMap information - about the configMap data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string - key to a path within a volume. - properties: - key: - description: key is the - key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify - whether the ConfigMap or its - keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information - about the downwardAPI data to - project - properties: - items: - description: Items is a list - of DownwardAPIVolume file - items: - description: DownwardAPIVolumeFile - represents information to - create the file containing - the pod field - properties: - fieldRef: - description: 'Required: - Selects a field of the - pod: only annotations, - labels, name and namespace - are supported.' - properties: - apiVersion: - description: Version - of the schema the - FieldPath is written - in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path - of the field to - select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: - Path is the relative - path name of the file - to be created. Must - not be absolute or contain - the ''..'' path. Must - be utf-8 encoded. The - first item of the relative - path must not start - with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container - name: required for - volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies - the output format - of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: - resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - description: secret information - about the secret data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string - key to a path within a volume. - properties: - key: - description: key is the - key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional field - specify whether the Secret - or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken - is information about the serviceAccountToken - data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte - mount on the host that shares a pod's lifetime - properties: - group: - description: |- - group to map volume access to - Default is no group - type: string - readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. - type: boolean - registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes - type: string - tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: |- - user to map volume access to - Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references - an already created Quobyte volume by - name. - type: string - required: - - registry - - volume - type: object - rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - keyring: - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - items: - type: string - type: array - pool: - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: boolean - secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO - persistent volume attached and mounted on - Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". - type: string - gateway: - description: gateway is the host address - of the ScaleIO API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name - of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable - SSL communication with Gateway, default - false - type: boolean - storageMode: - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO - Storage Pool associated with the protection - domain. - type: string - system: - description: system is the name of the - storage system as configured in ScaleIO. - type: string - volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether - the Secret or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - storageos: - description: storageOS represents a StorageOS - volume attached and mounted on Kubernetes - nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. - type: string - volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere - volume attached and mounted on kubelets - host machine - properties: - fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage - Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the - storage Policy Based Management (SPBM) - profile name. - type: string - volumePath: - description: volumePath is the path that - identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - containers - type: object - type: object - type: object - description: |- - MPIReplicaSpecs contains maps from `MPIReplicaType` to `ReplicaSpec` that - specify the MPI replicas to run. - type: object - runPolicy: - description: RunPolicy encapsulates various runtime policies of - the job. - properties: - activeDeadlineSeconds: - description: |- - Specifies the duration in seconds relative to the startTime that the job may be active - before the system tries to terminate it; value must be positive integer. - format: int64 - type: integer - backoffLimit: - description: Optional number of retries before marking this - job failed. - format: int32 - type: integer - cleanPodPolicy: - description: |- - CleanPodPolicy defines the policy to kill pods after the job completes. - Default to Running. - type: string - schedulingPolicy: - description: SchedulingPolicy defines the policy related to - scheduling, e.g. gang-scheduling - properties: - minAvailable: - description: |- - MinAvailable defines the minimal number of member to run the PodGroup. - If the gang-scheduling is set to the volcano, - input is passed to `.spec.mimMember` in PodGroup for the volcano. - When using this field, you need to make sure the application supports resizing (e.g., Elastic Horovod). - - - If not set, it defaults to the number of workers. - format: int32 - type: integer - minResources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - MinResources defines the minimal resources of members to run the PodGroup. - If the gang-scheduling is set to the volcano, - input is passed to `.spec.mimResources` in PodGroup for volcano. - type: object - priorityClass: - description: |- - PriorityClass defines the PodGroup's PriorityClass. - If the gang-scheduling is set to the volcano, - input is passed to `.spec.priorityClassName` in PodGroup for volcano. - type: string - queue: - description: |- - Queue defines the queue name to allocate resource for PodGroup. - If the gang-scheduling is set to the volcano, - input is passed to `.spec.queue` in PodGroup for the volcano. - type: string - scheduleTimeoutSeconds: - description: |- - SchedulerTimeoutSeconds defines the maximal time of members to wait before run the PodGroup. - Currently, this parameter isn't respected in any case. - TODO (tenzen-y): Modify comments when supporting scheduler-plugins. - format: int32 - type: integer - type: object - suspend: - default: false - description: |- - suspend specifies whether the MPIJob controller should create Pods or not. - If a MPIJob is created with suspend set to true, no Pods are created by - the MPIJob controller. If a MPIJob is suspended after creation (i.e. the - flag goes from false to true), the MPIJob controller will delete all - active Pods and PodGroups associated with this MPIJob. Also, it will suspend the - Launcher Job. Users must design their workload to gracefully handle this. - Suspending a Job will reset the StartTime field of the MPIJob. - - - Defaults to false. - type: boolean - ttlSecondsAfterFinished: - description: |- - TTLSecondsAfterFinished is the TTL to clean up jobs. - It may take extra ReconcilePeriod seconds for the cleanup, since - reconcile gets called periodically. - Default to infinite. - format: int32 - type: integer - type: object - slotsPerWorker: - default: 1 - description: |- - Specifies the number of slots per worker used in hostfile. - Defaults to 1. - format: int32 - type: integer - sshAuthMountPath: - default: /root/.ssh - description: |- - SSHAuthMountPath is the directory where SSH keys are mounted. - Defaults to "/root/.ssh". - type: string - required: - - mpiReplicaSpecs - type: object - numPorts: - description: |- - Number of ports to open for communication with the user container. These ports are opened on - the targeted NNF nodes and can be accessed outside of the k8s cluster (e.g. compute nodes). - The requested ports are made available as environment variables inside the container and in - the DWS workflow (NNF_CONTAINER_PORTS). - format: int32 - type: integer - pinned: - default: false - description: Pinned is true if this instance is an immutable copy - type: boolean - postRunTimeoutSeconds: - default: 300 - description: |- - Containers are expected to complete in the PostRun State. Allow this many seconds for the - containers to exit before declaring an error the workflow. - Defaults to 300 if not set. A value of 0 disables this behavior. - format: int64 - minimum: 0 - type: integer - preRunTimeoutSeconds: - default: 300 - description: |- - Containers are launched in the PreRun state. Allow this many seconds for the containers to - start before declaring an error to the workflow. - Defaults to 300 if not set. A value of 0 disables this behavior. - format: int64 - minimum: 0 - type: integer - retryLimit: - default: 6 - description: |- - Specifies the number of times a container will be retried upon a failure. A new pod is - deployed on each retry. Defaults to 6 by kubernetes itself and must be set. A value of 0 - disables retries. - format: int32 - minimum: 0 - type: integer - spec: - description: |- - Spec to define the containers created from this profile. This is used for non-MPI containers. - Refer to the K8s documentation for `PodSpec` for more definition: - https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec - Either this or MPISpec must be provided, but not both. - properties: - activeDeadlineSeconds: - description: |- - Optional duration in seconds the pod may be active on the node relative to - StartTime before the system will actively try to mark it failed and kill associated containers. - Value must be a positive integer. - format: int64 - type: integer - affinity: - description: If specified, the pod's scheduling constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for - the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with - the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the - corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - type: array - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - automountServiceAccountToken: - description: AutomountServiceAccountToken indicates whether a - service account token should be automatically mounted. - type: boolean - containers: - description: |- - List of containers belonging to the pod. - Containers cannot currently be added or removed. - There must be at least one container in a Pod. - Cannot be updated. - items: - description: A single application container that you want to - run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of a - set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to - each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must be - defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port in - a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external port - to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in - PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a raw - block device within a container. - properties: - devicePath: - description: devicePath is the path inside of the - container that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. - properties: - nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. - items: - type: string - type: array - options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS resolver options - of a pod. - properties: - name: - description: Required. - type: string - value: - type: string - type: object - type: array - searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. - items: - type: string - type: array - type: object - dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: |- - EnableServiceLinks indicates whether information about services should be injected into pod's - environment variables, matching the syntax of Docker links. - Optional: Defaults to true. - type: boolean - ephemeralContainers: - description: |- - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. This list cannot be specified when - creating a pod, and it cannot be modified by updating the pod spec. In order to add an - ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - items: - description: |- - An EphemeralContainer is a temporary container that you may add to an existing Pod for - user-initiated activities such as debugging. Ephemeral containers have no resource or - scheduling guarantees, and they will not be restarted when they exit or when a Pod is - removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the - Pod to exceed its resource allocation. - - - To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - properties: - args: - description: |- - Arguments to the entrypoint. - The image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of a - set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to - each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must be - defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: Lifecycle is not allowed for ephemeral containers. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the ephemeral container specified as a DNS_LABEL. - This name must be unique among all containers, init containers and ephemeral containers. - type: string - ports: - description: Ports are not allowed for ephemeral containers. - items: - description: ContainerPort represents a network port in - a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external port - to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - already allocated to the pod. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in - PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for the container to manage the restart behavior of each - container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. - type: string - securityContext: - description: |- - Optional: SecurityContext defines the security options the ephemeral container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - targetContainerName: - description: |- - If set, the name of the container from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - If not set then the ephemeral container uses the namespaces configured in the Pod spec. - - - The container runtime must implement support for this feature. If the runtime does not - support namespace targeting then the result of setting this field is undefined. - type: string - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a raw - block device within a container. - properties: - devicePath: - description: devicePath is the path inside of the - container that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - hostAliases: - description: |- - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - ip: - description: IP address of the host file entry. - type: string - type: object - type: array - hostIPC: - description: |- - Use the host's ipc namespace. - Optional: Default to false. - type: boolean - hostNetwork: - description: |- - Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. - Default to false. - type: boolean - hostPID: - description: |- - Use the host's pid namespace. - Optional: Default to false. - type: boolean - hostUsers: - description: |- - Use the host's user namespace. - Optional: Default to true. - If set to true or not present, the pod will be run in the host user namespace, useful - for when the pod needs a feature only available to the host user namespace, such as - loading a kernel module with CAP_SYS_MODULE. - When set to false, a new userns is created for the pod. Setting false is useful for - mitigating container breakout vulnerabilities even allowing users to run their - containers as root without actually having root privileges on the host. - This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - type: boolean - hostname: - description: |- - Specifies the hostname of the Pod - If not specified, the pod's hostname will be set to a system-defined value. - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - If specified, these secrets will be passed to individual puller implementations for them to use. - More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: array - initContainers: - description: |- - List of initialization containers belonging to the pod. - Init containers are executed in order prior to containers being started. If any - init container fails, the pod is considered to have failed and is handled according - to its restartPolicy. The name for an init container or normal container must be - unique among all containers. - Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken into account during scheduling - by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers - in a similar fashion. - Init containers cannot currently be added or removed. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - items: - description: A single application container that you want to - run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the - pod's namespace - properties: - key: - description: The key of the secret to select - from. Must be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of a - set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to - each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must be - defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port in - a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external port - to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in - PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving a GRPC - port. - properties: - port: - description: Port number of the gRPC service. Number - must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect to, - defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a raw - block device within a container. - properties: - devicePath: - description: devicePath is the path inside of the - container that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - nodeName: - description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - x-kubernetes-map-type: atomic - os: - description: |- - Specifies the OS of the containers in the pod. - Some pod and container fields are restricted if this is set. - - - If the OS field is set to linux, the following fields must be unset: - -securityContext.windowsOptions - - - If the OS field is set to windows, following fields must be unset: - - spec.hostPID - - spec.hostIPC - - spec.hostUsers - - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - - spec.shareProcessNamespace - - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - - spec.containers[*].securityContext.seLinuxOptions - - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup - properties: - name: - description: |- - Name is the name of the operating system. The currently supported values are linux and windows. - Additional value may be defined in future and can be one of: - https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values and treat unrecognized values in this field as os: null - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by the RuntimeClass admission controller. If - the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - The RuntimeClass admission controller will reject Pod create requests which have the overhead already - set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - type: object - preemptionPolicy: - description: |- - PreemptionPolicy is the Policy for preempting pods with lower priority. - One of Never, PreemptLowerPriority. - Defaults to PreemptLowerPriority if unset. - type: string - priority: - description: |- - The priority value. Various system components use this field to find the - priority of the pod. When Priority Admission Controller is enabled, it - prevents users from setting this field. The admission controller populates - this field from PriorityClassName. - The higher the value, the higher the priority. - format: int32 - type: integer - priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and - "system-cluster-critical" are two special keywords which indicate the - highest priorities with the former being the highest priority. Any other - name must be defined by creating a PriorityClass object with that name. - If not specified, the pod priority will be default or zero if there is no - default. - type: string - readinessGates: - description: |- - If specified, all readiness gates will be evaluated for pod readiness. - A pod is ready when all its containers are ready AND - all conditions specified in the readiness gates have status equal to "True" - More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - items: - description: PodReadinessGate contains the reference to a pod - condition - properties: - conditionType: - description: ConditionType refers to a condition in the - pod's condition list with matching type. - type: string - required: - - conditionType - type: object - type: array - resourceClaims: - description: |- - ResourceClaims defines which ResourceClaims must be allocated - and reserved before the Pod is allowed to start. The resources - will be made available to those containers which consume them - by name. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. - items: - description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. - It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. - Containers that need access to the ResourceClaim reference it with this name. - properties: - name: - description: |- - Name uniquely identifies this resource claim inside the pod. - This must be a DNS_LABEL. - type: string - source: - description: Source describes where to find the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. - - - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - - - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - restartPolicy: - description: |- - Restart policy for all containers within the pod. - One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - type: string - runtimeClassName: - description: |- - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - type: string - schedulerName: - description: |- - If specified, the pod will be dispatched by specified scheduler. - If not specified, the pod will be dispatched by default scheduler. - type: string - schedulingGates: - description: |- - SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - scheduler will not attempt to schedule the pod. - - - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. - items: - description: PodSchedulingGate is associated to a Pod to guard - its scheduling. - properties: - name: - description: |- - Name of the scheduling gate. - Each scheduling gate must have a unique name field. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - securityContext: - description: |- - SecurityContext holds pod-level security attributes and common container settings. - Optional: Defaults to empty. See type description for default values of each field. - properties: - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - Deprecated: Use serviceAccountName instead. - type: string - serviceAccountName: - description: |- - ServiceAccountName is the name of the ServiceAccount to use to run this pod. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - type: string - setHostnameAsFQDN: - description: |- - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - If a pod does not have FQDN, this has no effect. - Default to false. - type: boolean - shareProcessNamespace: - description: |- - Share a single process namespace between all of the containers in a pod. - When this is set containers will be able to view and signal processes from other containers - in the same pod, and the first process in each container will not be assigned PID 1. - HostPID and ShareProcessNamespace cannot both be set. - Optional: Default to false. - type: boolean - subdomain: - description: |- - If specified, the fully qualified Pod hostname will be "...svc.". - If not specified, the pod will not have a domainname at all. - type: string - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - If this value is nil, the default grace period will be used instead. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - Defaults to 30 seconds. - format: int64 - type: integer - tolerations: - description: If specified, the pod's tolerations. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: |- - TopologySpreadConstraints describes how a group of pods ought to spread across topology - domains. Scheduler will schedule pods in a way which abides by the constraints. - All topologySpreadConstraints are ANDed. - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - description: |- - List of volumes that can be mounted by containers belonging to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes - items: - description: Volume represents a named volume in a pod that - may be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - format: int32 - type: integer - readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: boolean - volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk mount - on the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: - None, Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk in - the blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in the - blob storage - type: string - fsType: - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single - blob disk per storage account Managed: azure managed - data disk (only in managed availability set). defaults - to shared' - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service - mount on the host and bind mount to the pod. - properties: - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that - contains Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the host - that shares a pod's lifetime - properties: - monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - items: - type: string - type: array - path: - description: 'path is Optional: Used as the mounted - root, rather than the full Ceph tree, default is /' - type: string - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: boolean - secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - required: - - monitors - type: object - cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: boolean - secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should - populate this volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents - ephemeral storage that is handled by certain external - CSI drivers (Beta feature). - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about the - pod that should populate this volume - properties: - defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: Items is a list of downward API volume - file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must not - be absolute or contain the ''..'' path. Must - be utf-8 encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references - one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over - volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource that - is attached to a kubelet's host machine and then exposed - to the pod. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide - names (WWNs)' - items: - type: string - type: array - wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - items: - type: string - type: array - type: object - flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to use - for this volume. - type: string - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds - extra command options if any.' - type: object - readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached - to a kubelet's host machine. This depends on the Flocker - control service being running - properties: - datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. - This is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - properties: - fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - format: int32 - type: integer - pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: boolean - required: - - pdName - type: object - gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. - properties: - directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the specified - revision. - type: string - required: - - repository - type: object - glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md - properties: - endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support - iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support - iSCSI Session CHAP authentication - type: boolean - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI - target and initiator authentication - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - properties: - path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: boolean - server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - properties: - claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - type: string - readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host - machine - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon Controller - persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume - attached and mounted on kubelets host machine - properties: - fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx - volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources secrets, - configmaps, and downward API - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume projections - items: - description: Projection that may be projected along - with other supported volume types - properties: - configMap: - description: configMap information about the configMap - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify whether the - ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the - downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema - the FieldPath is written in terms - of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to - select in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file to - be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 - encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env - vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - description: secret information about the secret - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime - properties: - group: - description: |- - group to map volume access to - Default is no group - type: string - readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. - type: boolean - registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes - type: string - tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: |- - user to map volume access to - Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - keyring: - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - items: - type: string - type: array - pool: - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: boolean - secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". - type: string - gateway: - description: gateway is the host address of the ScaleIO - API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the ScaleIO - Protection Domain for the configured storage. - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL communication - with Gateway, default false - type: boolean - storageMode: - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage Pool - associated with the protection domain. - type: string - system: - description: system is the name of the storage system - as configured in ScaleIO. - type: string - volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether the Secret - or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. - type: string - volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy Based - Management (SPBM) profile ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy - Based Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies - vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - containers - type: object - storages: - description: List of possible filesystems supported by this container - profile - items: - description: |- - NnfContainerProfileStorage defines the mount point information that will be available to the - container - properties: - name: - description: 'Name specifies the name of the mounted filesystem; - must match the user supplied #DW directive' - type: string - optional: - default: false - description: |- - Optional designates that this filesystem is available to be mounted, but can be ignored by - the user not supplying this filesystem in the #DW directives - type: boolean - pvcMode: - description: |- - For DW_GLOBAL_ (global lustre) storages, the access mode must match what is configured in - the LustreFilesystem resource for the namespace. Defaults to `ReadWriteMany` for global - lustre, otherwise empty. - type: string - required: - - name - - optional - type: object - type: array - userID: - description: |- - UserID specifies the user ID that is allowed to use this profile. If this is specified, only - Workflows that have a matching user ID can select this profile. - format: int32 - type: integer - required: - - retryLimit - type: object - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - required: - - data - type: object - served: false - storage: false - name: v1alpha2 schema: openAPIV3Schema: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml index 6a3503f1..692dffab 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml @@ -14,7384 +14,6 @@ spec: singular: nnfdatamovementmanager scope: Namespaced versions: - - additionalPrinterColumns: - - description: True if manager readied all resoures - jsonPath: .status.ready - name: READY - type: boolean - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfDataMovementManager is the Schema for the nnfdatamovementmanagers - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfDataMovementManagerSpec defines the desired state of NnfDataMovementManager - properties: - hostPath: - description: Host Path defines the directory location of shared mounts - on an individual worker node. - type: string - mountPath: - description: Mount Path defines the location within the container - at which the Host Path volume should be mounted. - type: string - selector: - description: |- - Selector defines the pod selector used in scheduling the worker nodes. This value is duplicated - to the template.spec.metadata.labels to satisfy the requirements of the worker's Daemon Set. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - template: - description: |- - Template defines the pod template that is used for the basis of the worker Daemon Set that - manages the per node data movement operations. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - type: object - spec: - description: |- - Specification of the desired behavior of the pod. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - activeDeadlineSeconds: - description: |- - Optional duration in seconds the pod may be active on the node relative to - StartTime before the system will actively try to mark it failed and kill associated containers. - Value must be a positive integer. - format: int64 - type: integer - affinity: - description: If specified, the pod's scheduling constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling rules - for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated - with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching - the corresponding nodeSelectorTerm, in the - range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector - terms. The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - x-kubernetes-map-type: atomic - type: array - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of - resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, - etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of - resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - automountServiceAccountToken: - description: AutomountServiceAccountToken indicates whether - a service account token should be automatically mounted. - type: boolean - containers: - description: |- - List of containers belonging to the pod. - Containers cannot currently be added or removed. - There must be at least one container in a Pod. - Cannot be updated. - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. - properties: - nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. - items: - type: string - type: array - options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS resolver - options of a pod. - properties: - name: - description: Required. - type: string - value: - type: string - type: object - type: array - searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. - items: - type: string - type: array - type: object - dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: |- - EnableServiceLinks indicates whether information about services should be injected into pod's - environment variables, matching the syntax of Docker links. - Optional: Defaults to true. - type: boolean - ephemeralContainers: - description: |- - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. This list cannot be specified when - creating a pod, and it cannot be modified by updating the pod spec. In order to add an - ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - items: - description: |- - An EphemeralContainer is a temporary container that you may add to an existing Pod for - user-initiated activities such as debugging. Ephemeral containers have no resource or - scheduling guarantees, and they will not be restarted when they exit or when a Pod is - removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the - Pod to exceed its resource allocation. - - - To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - properties: - args: - description: |- - Arguments to the entrypoint. - The image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: Lifecycle is not allowed for ephemeral - containers. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the ephemeral container specified as a DNS_LABEL. - This name must be unique among all containers, init containers and ephemeral containers. - type: string - ports: - description: Ports are not allowed for ephemeral containers. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - already allocated to the pod. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for the container to manage the restart behavior of each - container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. - type: string - securityContext: - description: |- - Optional: SecurityContext defines the security options the ephemeral container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - targetContainerName: - description: |- - If set, the name of the container from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - If not set then the ephemeral container uses the namespaces configured in the Pod spec. - - - The container runtime must implement support for this feature. If the runtime does not - support namespace targeting then the result of setting this field is undefined. - type: string - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - hostAliases: - description: |- - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - ip: - description: IP address of the host file entry. - type: string - type: object - type: array - hostIPC: - description: |- - Use the host's ipc namespace. - Optional: Default to false. - type: boolean - hostNetwork: - description: |- - Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. - Default to false. - type: boolean - hostPID: - description: |- - Use the host's pid namespace. - Optional: Default to false. - type: boolean - hostUsers: - description: |- - Use the host's user namespace. - Optional: Default to true. - If set to true or not present, the pod will be run in the host user namespace, useful - for when the pod needs a feature only available to the host user namespace, such as - loading a kernel module with CAP_SYS_MODULE. - When set to false, a new userns is created for the pod. Setting false is useful for - mitigating container breakout vulnerabilities even allowing users to run their - containers as root without actually having root privileges on the host. - This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - type: boolean - hostname: - description: |- - Specifies the hostname of the Pod - If not specified, the pod's hostname will be set to a system-defined value. - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - If specified, these secrets will be passed to individual puller implementations for them to use. - More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: array - initContainers: - description: |- - List of initialization containers belonging to the pod. - Init containers are executed in order prior to containers being started. If any - init container fails, the pod is considered to have failed and is handled according - to its restartPolicy. The name for an init container or normal container must be - unique among all containers. - Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken into account during scheduling - by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers - in a similar fashion. - Init containers cannot currently be added or removed. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies an action involving - a GRPC port. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies the http request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies an action involving - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - nodeName: - description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - x-kubernetes-map-type: atomic - os: - description: |- - Specifies the OS of the containers in the pod. - Some pod and container fields are restricted if this is set. - - - If the OS field is set to linux, the following fields must be unset: - -securityContext.windowsOptions - - - If the OS field is set to windows, following fields must be unset: - - spec.hostPID - - spec.hostIPC - - spec.hostUsers - - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - - spec.shareProcessNamespace - - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - - spec.containers[*].securityContext.seLinuxOptions - - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup - properties: - name: - description: |- - Name is the name of the operating system. The currently supported values are linux and windows. - Additional value may be defined in future and can be one of: - https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values and treat unrecognized values in this field as os: null - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by the RuntimeClass admission controller. If - the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - The RuntimeClass admission controller will reject Pod create requests which have the overhead already - set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - type: object - preemptionPolicy: - description: |- - PreemptionPolicy is the Policy for preempting pods with lower priority. - One of Never, PreemptLowerPriority. - Defaults to PreemptLowerPriority if unset. - type: string - priority: - description: |- - The priority value. Various system components use this field to find the - priority of the pod. When Priority Admission Controller is enabled, it - prevents users from setting this field. The admission controller populates - this field from PriorityClassName. - The higher the value, the higher the priority. - format: int32 - type: integer - priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and - "system-cluster-critical" are two special keywords which indicate the - highest priorities with the former being the highest priority. Any other - name must be defined by creating a PriorityClass object with that name. - If not specified, the pod priority will be default or zero if there is no - default. - type: string - readinessGates: - description: |- - If specified, all readiness gates will be evaluated for pod readiness. - A pod is ready when all its containers are ready AND - all conditions specified in the readiness gates have status equal to "True" - More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - items: - description: PodReadinessGate contains the reference to - a pod condition - properties: - conditionType: - description: ConditionType refers to a condition in - the pod's condition list with matching type. - type: string - required: - - conditionType - type: object - type: array - resourceClaims: - description: |- - ResourceClaims defines which ResourceClaims must be allocated - and reserved before the Pod is allowed to start. The resources - will be made available to those containers which consume them - by name. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. - items: - description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. - It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. - Containers that need access to the ResourceClaim reference it with this name. - properties: - name: - description: |- - Name uniquely identifies this resource claim inside the pod. - This must be a DNS_LABEL. - type: string - source: - description: Source describes where to find the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. - - - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - - - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - restartPolicy: - description: |- - Restart policy for all containers within the pod. - One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - type: string - runtimeClassName: - description: |- - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - type: string - schedulerName: - description: |- - If specified, the pod will be dispatched by specified scheduler. - If not specified, the pod will be dispatched by default scheduler. - type: string - schedulingGates: - description: |- - SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - scheduler will not attempt to schedule the pod. - - - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. - items: - description: PodSchedulingGate is associated to a Pod to - guard its scheduling. - properties: - name: - description: |- - Name of the scheduling gate. - Each scheduling gate must have a unique name field. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - securityContext: - description: |- - SecurityContext holds pod-level security attributes and common container settings. - Optional: Defaults to empty. See type description for default values of each field. - properties: - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be - set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of - the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - Deprecated: Use serviceAccountName instead. - type: string - serviceAccountName: - description: |- - ServiceAccountName is the name of the ServiceAccount to use to run this pod. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - type: string - setHostnameAsFQDN: - description: |- - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - If a pod does not have FQDN, this has no effect. - Default to false. - type: boolean - shareProcessNamespace: - description: |- - Share a single process namespace between all of the containers in a pod. - When this is set containers will be able to view and signal processes from other containers - in the same pod, and the first process in each container will not be assigned PID 1. - HostPID and ShareProcessNamespace cannot both be set. - Optional: Default to false. - type: boolean - subdomain: - description: |- - If specified, the fully qualified Pod hostname will be "...svc.". - If not specified, the pod will not have a domainname at all. - type: string - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - If this value is nil, the default grace period will be used instead. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - Defaults to 30 seconds. - format: int64 - type: integer - tolerations: - description: If specified, the pod's tolerations. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - description: |- - TopologySpreadConstraints describes how a group of pods ought to spread across topology - domains. Scheduler will schedule pods in a way which abides by the constraints. - All topologySpreadConstraints are ANDed. - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - description: |- - List of volumes that can be mounted by containers belonging to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes - items: - description: Volume represents a named volume in a pod that - may be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - format: int32 - type: integer - readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: boolean - volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk - mount on the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: - None, Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk - in the blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in - the blob storage - type: string - fsType: - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single - blob disk per storage account Managed: azure - managed data disk (only in managed availability - set). defaults to shared' - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service - mount on the host and bind mount to the pod. - properties: - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that - contains Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the - host that shares a pod's lifetime - properties: - monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - items: - type: string - type: array - path: - description: 'path is Optional: Used as the mounted - root, rather than the full Ceph tree, default - is /' - type: string - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: boolean - secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - required: - - monitors - type: object - cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: boolean - secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should - populate this volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents - ephemeral storage that is handled by certain external - CSI drivers (Beta feature). - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about - the pod that should populate this volume - properties: - defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: Items is a list of downward API volume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references - one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over - volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource - that is attached to a kubelet's host machine and then - exposed to the pod. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target - worldwide names (WWNs)' - items: - type: string - type: array - wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - items: - type: string - type: array - type: object - flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to - use for this volume. - type: string - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds - extra command options if any.' - type: object - readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached - to a kubelet's host machine. This depends on the Flocker - control service being running - properties: - datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. - This is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - properties: - fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - format: int32 - type: integer - pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: boolean - required: - - pdName - type: object - gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. - properties: - directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the - specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md - properties: - endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support - iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support - iSCSI Session CHAP authentication - type: boolean - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI - target and initiator authentication - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - properties: - path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: boolean - server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - properties: - claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - type: string - readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host - machine - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon - Controller persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume - attached and mounted on kubelets host machine - properties: - fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx - volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources - secrets, configmaps, and downward API - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume projections - items: - description: Projection that may be projected - along with other supported volume types - properties: - configMap: - description: configMap information about the - configMap data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional specify whether - the ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about - the downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects - a field of the pod: only annotations, - labels, name and namespace are - supported.' - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file - to be created. Must not be absolute - or contain the ''..'' path. Must - be utf-8 encoded. The first item - of the relative path must not - start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - type: object - secret: - description: secret information about the - secret data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte mount on the - host that shares a pod's lifetime - properties: - group: - description: |- - group to map volume access to - Default is no group - type: string - readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. - type: boolean - registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes - type: string - tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: |- - user to map volume access to - Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references - an already created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine - type: string - image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - keyring: - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - items: - type: string - type: array - pool: - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: boolean - secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent - volume attached and mounted on Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". - type: string - gateway: - description: gateway is the host address of the - ScaleIO API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the - ScaleIO Protection Domain for the configured storage. - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL - communication with Gateway, default false - type: boolean - storageMode: - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage - Pool associated with the protection domain. - type: string - system: - description: system is the name of the storage system - as configured in ScaleIO. - type: string - volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether the - Secret or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume - attached and mounted on Kubernetes nodes. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. - type: string - volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume - attached and mounted on kubelets host machine - properties: - fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy - Based Management (SPBM) profile ID associated - with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy - Based Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies - vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - containers - type: object - type: object - updateStrategy: - description: |- - UpdateStrategy defines the UpdateStrategy that is used for the basis of the worker Daemon Set - that manages the per node data movement operations. - properties: - rollingUpdate: - description: |- - Rolling update config params. Present only if type = "RollingUpdate". - --- - TODO: Update this to follow our convention for oneOf, whatever we decide it - to be. Same as Deployment `strategy.rollingUpdate`. - See https://github.com/kubernetes/kubernetes/issues/35345 - properties: - maxSurge: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of nodes with an existing available DaemonSet pod that - can have an updated DaemonSet pod during during an update. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - This can not be 0 if MaxUnavailable is 0. - Absolute number is calculated from percentage by rounding up to a minimum of 1. - Default value is 0. - Example: when this is set to 30%, at most 30% of the total number of nodes - that should be running the daemon pod (i.e. status.desiredNumberScheduled) - can have their a new pod created before the old pod is marked as deleted. - The update starts by launching new pods on 30% of nodes. Once an updated - pod is available (Ready for at least minReadySeconds) the old DaemonSet pod - on that node is marked deleted. If the old pod becomes unavailable for any - reason (Ready transitions to false, is evicted, or is drained) an updated - pod is immediatedly created on that node without considering surge limits. - Allowing surge implies the possibility that the resources consumed by the - daemonset on any given node can double if the readiness check fails, and - so resource intensive daemonsets should take into account that they may - cause evictions during disruption. - x-kubernetes-int-or-string: true - maxUnavailable: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of DaemonSet pods that can be unavailable during the - update. Value can be an absolute number (ex: 5) or a percentage of total - number of DaemonSet pods at the start of the update (ex: 10%). Absolute - number is calculated from percentage by rounding up. - This cannot be 0 if MaxSurge is 0 - Default value is 1. - Example: when this is set to 30%, at most 30% of the total number of nodes - that should be running the daemon pod (i.e. status.desiredNumberScheduled) - can have their pods stopped for an update at any given time. The update - starts by stopping at most 30% of those DaemonSet pods and then brings - up new DaemonSet pods in their place. Once the new pods are available, - it then proceeds onto other DaemonSet pods, thus ensuring that at least - 70% of original number of DaemonSet pods are available at all times during - the update. - x-kubernetes-int-or-string: true - type: object - type: - description: Type of daemon set update. Can be "RollingUpdate" - or "OnDelete". Default is RollingUpdate. - type: string - type: object - required: - - hostPath - - mountPath - - selector - - template - - updateStrategy - type: object - status: - description: NnfDataMovementManagerStatus defines the observed state of - NnfDataMovementManager - properties: - ready: - default: false - description: |- - Ready indicates that the Data Movement Manager has achieved the desired readiness state - and all managed resources are initialized. - type: boolean - required: - - ready - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - description: True if manager readied all resoures jsonPath: .status.ready diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml index b7ba3232..7e12c44e 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml @@ -14,132 +14,6 @@ spec: singular: nnfdatamovementprofile scope: Namespaced versions: - - additionalPrinterColumns: - - description: True if this is the default instance - jsonPath: .data.default - name: DEFAULT - type: boolean - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfDataMovementProfile is the Schema for the nnfdatamovementprofiles - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - data: - description: NnfDataMovementProfileData defines the desired state of NnfDataMovementProfile - properties: - command: - default: ulimit -n 2048 && mpirun --allow-run-as-root --hostfile $HOSTFILE - dcp --progress 1 --uid $UID --gid $GID $SRC $DEST - description: |- - Command to execute to perform data movement. $VARS are replaced by the nnf software and must - be present in the command. - Available $VARS: - HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the - slots/max_slots for each host. This hostfile is created at `/tmp//hostfile` - UID: User ID that is inherited from the Workflow - GID: Group ID that is inherited from the Workflow - SRC: source for the data movement - DEST destination for the data movement - type: string - createDestDir: - default: true - description: |- - CreateDestDir will ensure that the destination directory exists before performing data - movement. This will cause a number of stat commands to determine the source and destination - file types, so that the correct pathing for the destination can be determined. Then, a mkdir - is issued. - type: boolean - default: - default: false - description: Default is true if this instance is the default resource - to use - type: boolean - logStdout: - default: false - description: |- - If true, enable the command's stdout to be saved in the log when the command completes - successfully. On failure, the output is always logged. - type: boolean - maxSlots: - default: 0 - description: |- - MaxSlots is the number of max_slots specified in the MPI hostfile. A value of 0 disables the - use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. - minimum: 0 - type: integer - pinned: - default: false - description: Pinned is true if this instance is an immutable copy - type: boolean - progressIntervalSeconds: - default: 5 - description: |- - NnfDataMovement resources have the ability to collect and store the progress percentage and the - last few lines of output in the CommandStatus field. This number is used for the interval to collect - the progress data. `dcp --progress N` must be included in the data movement command in order for - progress to be collected. A value of 0 disables this functionality. - minimum: 0 - type: integer - slots: - default: 8 - description: |- - Slots is the number of slots specified in the MPI hostfile. A value of 0 disables the use of - slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. - minimum: 0 - type: integer - statCommand: - default: mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- - setpriv --euid $UID --egid $GID --clear-groups stat --cached never - -c '%F' $PATH - description: |- - If CreateDestDir is true, then use StatCommand to perform the stat commands. - Use setpriv to stat the path with the specified UID/GID. - Available $VARS: - HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the - slots/max_slots for each host. This hostfile is created at - `/tmp//hostfile`. This is the same hostfile used as the one for Command. - UID: User ID that is inherited from the Workflow - GID: Group ID that is inherited from the Workflow - PATH: Path to stat - type: string - storeStdout: - default: false - description: |- - Similar to logStdout, store the command's stdout in Status.Message when the command completes - successfully. On failure, the output is always stored. - type: boolean - required: - - command - - createDestDir - - maxSlots - - slots - - statCommand - type: object - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - type: object - served: false - storage: false - subresources: {} - additionalPrinterColumns: - description: True if this is the default instance jsonPath: .data.default diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml index b4d185c6..8af1d70c 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovements.yaml @@ -14,412 +14,6 @@ spec: singular: nnfdatamovement scope: Namespaced versions: - - additionalPrinterColumns: - - description: Current state - jsonPath: .status.state - name: STATE - type: string - - description: Status of current state - jsonPath: .status.status - name: STATUS - type: string - - jsonPath: .status.error.severity - name: ERROR - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfDataMovement is the Schema for the nnfdatamovements API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfDataMovementSpec defines the desired state of NnfDataMovement - properties: - cancel: - default: false - description: Set to true if the data movement operation should be - canceled. - type: boolean - destination: - description: Destination describes the destination of the data movement - operation - properties: - path: - description: Path describes the location of the user data relative - to the storage instance - type: string - storageReference: - description: |- - Storage describes the storage backing this data movement specification; Storage can reference - either NNF storage or global Lustre storage depending on the object references Kind field. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: object - groupId: - description: |- - Group Id specifies the group ID for the data movement operation. This value is used - in conjunction with the user ID to ensure the user has valid permissions to perform - the data movement operation. - format: int32 - type: integer - profileReference: - description: |- - ProfileReference is an object reference to an NnfDataMovementProfile that is used to - configure data movement. If empty, the default profile is used. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - source: - description: Source describes the source of the data movement operation - properties: - path: - description: Path describes the location of the user data relative - to the storage instance - type: string - storageReference: - description: |- - Storage describes the storage backing this data movement specification; Storage can reference - either NNF storage or global Lustre storage depending on the object references Kind field. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: object - userConfig: - description: |- - User defined configuration on how data movement should be performed. This overrides the - configuration defined in the supplied ProfileReference/NnfDataMovementProfile. These values - are typically set by the Copy Offload API. - properties: - dcpOptions: - description: Extra options to pass to the dcp command (used to - perform data movement). - type: string - dryrun: - default: false - description: |- - Fake the Data Movement operation. The system "performs" Data Movement but the command to do so - is trivial. This means a Data Movement request is still submitted but the IO is skipped. - type: boolean - logStdout: - default: false - description: |- - If true, enable the command's stdout to be saved in the log when the command completes - successfully. On failure, the output is always logged. - Note: Enabling this option may degrade performance. - type: boolean - maxSlots: - description: |- - The number of max_slots specified in the MPI hostfile. A value of 0 disables the use of slots - in the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. - type: integer - mpirunOptions: - description: Extra options to pass to the mpirun command (used - to perform data movement). - type: string - slots: - description: |- - The number of slots specified in the MPI hostfile. A value of 0 disables the use of slots in - the hostfile. Nil will defer to the value specified in the NnfDataMovementProfile. - type: integer - storeStdout: - default: false - description: |- - Similar to LogStdout, store the command's stdout in Status.Message when the command completes - successfully. On failure, the output is always stored. - Note: Enabling this option may degrade performance. - type: boolean - type: object - userId: - description: |- - User Id specifies the user ID for the data movement operation. This value is used - in conjunction with the group ID to ensure the user has valid permissions to perform - the data movement operation. - format: int32 - type: integer - type: object - status: - description: NnfDataMovementStatus defines the observed state of NnfDataMovement - properties: - commandStatus: - description: |- - CommandStatus reflects the current status of the underlying Data Movement command - as it executes. The command status is polled at a certain frequency to avoid excessive - updates to the Data Movement resource. - properties: - command: - description: The command that was executed during data movement. - type: string - data: - description: |- - Data is parsed from the dcp output when the command is finished. This is the total amount of - data copied by dcp. - type: string - directories: - description: |- - Directories is parsed from the dcp output when the command is finished. This is the number of - directories that dcp copied. Note: This value may be inflated due to NNF index mount - directories when copying from XFS or GFS2 filesystems. - format: int32 - type: integer - elapsedTime: - description: ElapsedTime reflects the elapsed time since the underlying - data movement command started. - type: string - files: - description: |- - Files is parsed from the dcp output when the command is finished. This is the number of files - that dcp copied. - format: int32 - type: integer - items: - description: |- - Items is parsed from the dcp output when the command is finished. This is a total of - the number of directories, files, and links that dcp copied. - format: int32 - type: integer - lastMessage: - description: |- - LastMessage reflects the last message received over standard output or standard error as - captured by the underlying data movement command. - type: string - lastMessageTime: - description: |- - LastMessageTime reflects the time at which the last message was received over standard output - or standard error by the underlying data movement command. - format: date-time - type: string - links: - description: |- - Links is parsed from the dcp output when the command is finished. This is the number of links - that dcp copied. - format: int32 - type: integer - progress: - description: |- - ProgressPercentage refects the progress of the underlying data movement command as captured from - standard output. A best effort is made to parse the command output as a percentage. If no - progress has yet to be measured than this field is omitted. If the latest command output does - not contain a valid percentage, then the value is unchanged from the previously parsed value. - format: int32 - type: integer - rate: - description: |- - Rate is parsed from the dcp output when the command is finished. This is transfer rate of the - data copied by dcp. - type: string - seconds: - description: Seconds is parsed from the dcp output when the command - is finished. - type: string - type: object - endTime: - description: EndTime reflects the time at which the Data Movement - operation ended. - format: date-time - type: string - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - message: - description: |- - Message contains any text that explains the Status. If Data Movement failed or storeStdout is - enabled, this will contain the command's output. - type: string - restarts: - description: Restarts contains the number of restarts of the Data - Movement operation. - type: integer - startTime: - description: StartTime reflects the time at which the Data Movement - operation started. - format: date-time - type: string - state: - description: Current state of data movement. - enum: - - Starting - - Running - - Finished - type: string - status: - description: Status of the current state. - enum: - - Success - - Failed - - Invalid - - Cancelled - type: string - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - description: Current state jsonPath: .status.state diff --git a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml index 98cc6710..cc1a0186 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnflustremgts.yaml @@ -14,277 +14,6 @@ spec: singular: nnflustremgt scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfLustreMGT is the Schema for the nnfstorageprofiles API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfLustreMGTSpec defines the desired state of NnfLustreMGT - properties: - addresses: - description: Addresses is the list of LNet addresses for the MGT - items: - type: string - type: array - claimList: - description: ClaimList is the list of currently in use fsnames - items: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: array - fsNameBlackList: - description: |- - FsNameBlackList is a list of fsnames that can't be used. This may be - necessary if the MGT hosts file systems external to Rabbit - items: - type: string - type: array - fsNameStart: - description: FsNameStart is the starting fsname to be used - maxLength: 8 - minLength: 8 - type: string - fsNameStartReference: - description: |- - FsNameStartReference can be used to add a configmap where the starting fsname is - stored. If this reference is set, it takes precendence over FsNameStart. The configmap - will be updated with the next available fsname anytime an fsname is used. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - required: - - addresses - type: object - status: - description: NnfLustreMGTStatus defines the current state of NnfLustreMGT - properties: - claimList: - description: ClaimList is the list of currently in use fsnames - items: - properties: - fsname: - type: string - reference: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - fsNameNext: - description: FsNameNext is the next available fsname that hasn't been - used - maxLength: 8 - minLength: 8 - type: string - type: object - type: object - served: false - storage: false - subresources: - status: {} - name: v1alpha2 schema: openAPIV3Schema: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml index fa2c7460..2fc2e4f8 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml @@ -14,169 +14,6 @@ spec: singular: nnfnodeblockstorage scope: Namespaced versions: - - additionalPrinterColumns: - - jsonPath: .status.ready - name: READY - type: string - - jsonPath: .status.error.severity - name: ERROR - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. - Storage spec are created on request of the user and fullfilled by the NNF Node Controller. - properties: - allocations: - description: Allocations is the list of storage allocations to make - items: - properties: - access: - description: List of nodes where /dev devices should be created - items: - type: string - type: array - capacity: - description: Aggregate capacity of the block devices for each - allocation - format: int64 - type: integer - type: object - type: array - sharedAllocation: - description: SharedAllocation is used when a single NnfNodeBlockStorage - allocation is used by multiple NnfNodeStorage allocations - type: boolean - required: - - sharedAllocation - type: object - status: - properties: - allocations: - description: Allocations is the list of storage allocations that were - made - items: - properties: - accesses: - additionalProperties: - properties: - devicePaths: - description: /dev paths for each of the block devices - items: - type: string - type: array - storageGroupId: - description: Redfish ID for the storage group - type: string - type: object - description: Accesses is a map of node name to the access status - type: object - capacityAllocated: - description: |- - Total capacity allocated for the storage. This may differ from the requested storage - capacity as the system may round up to the requested capacity to satisify underlying - storage requirements (i.e. block size / stripe size). - format: int64 - type: integer - devices: - description: List of NVMe namespaces used by this allocation - items: - properties: - NQN: - description: NQN of the base NVMe device - type: string - capacityAllocated: - description: |- - Total capacity allocated for the storage. This may differ from the requested storage - capacity as the system may round up to the requested capacity to satisify underlying - storage requirements (i.e. block size / stripe size). - format: int64 - type: integer - namespaceId: - description: Id of the Namespace on the NVMe device (e.g., - "2") - type: string - required: - - NQN - - namespaceId - type: object - type: array - storagePoolId: - description: Redfish ID for the storage pool - type: string - type: object - type: array - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - podStartTime: - description: |- - PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did - last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths - listed in the status section are from the current boot of the node. - format: date-time - type: string - ready: - type: boolean - required: - - ready - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.ready name: READY diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml index 28410fe3..ae7eecc8 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeecdata.yaml @@ -14,46 +14,6 @@ spec: singular: nnfnodeecdata scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfNodeECData is the Schema for the nnfnodeecdata API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfNodeECDataSpec defines the desired state of NnfNodeECData - type: object - status: - description: NnfNodeECDataStatus defines the observed state of NnfNodeECData - properties: - data: - additionalProperties: - additionalProperties: - type: string - type: object - type: object - type: object - type: object - served: false - storage: false - subresources: - status: {} - name: v1alpha2 schema: openAPIV3Schema: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml index c174c113..becc5225 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml @@ -14,166 +14,6 @@ spec: singular: nnfnode scope: Namespaced versions: - - additionalPrinterColumns: - - description: Current desired state - jsonPath: .spec.state - name: STATE - type: string - - description: Health of node - jsonPath: .status.health - name: HEALTH - type: string - - description: Current status of node - jsonPath: .status.status - name: STATUS - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - - description: Parent pod name - jsonPath: .spec.pod - name: POD - priority: 1 - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfNode is the Schema for the NnfNode API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfNodeSpec defines the desired state of NNF Node - properties: - name: - description: The unique name for this NNF Node - type: string - pod: - description: Pod name for this NNF Node - type: string - state: - description: State reflects the desired state of this NNF Node resource - enum: - - Enable - - Disable - type: string - required: - - state - type: object - status: - description: NnfNodeStatus defines the observed status of NNF Node - properties: - capacity: - format: int64 - type: integer - capacityAllocated: - format: int64 - type: integer - drives: - items: - description: NnfDriveStatus defines the observe status of drives - connected to this NNF Node - properties: - capacity: - description: |- - Capacity in bytes of the device. The full capacity may not - be usable depending on what the storage driver can provide. - format: int64 - type: integer - firmwareVersion: - description: The firmware version of this storage controller. - type: string - health: - description: NnfResourceHealthType defines the health of an - NNF resource. - type: string - id: - description: ID reflects the NNF Node unique identifier for - this NNF Server resource. - type: string - model: - description: Model is the manufacturer information about the - device - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - serialNumber: - description: The serial number for this storage controller. - type: string - slot: - description: Physical slot location of the storage controller. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - wearLevel: - description: WearLevel in percent for SSDs - format: int64 - type: integer - type: object - type: array - fenced: - description: Fenced is true when the NNF Node is fenced by the STONITH - agent, and false otherwise. - type: boolean - health: - description: NnfResourceHealthType defines the health of an NNF resource. - type: string - lnetNid: - description: LNetNid is the LNet address for the NNF node - type: string - servers: - items: - description: NnfServerStatus defines the observed status of servers - connected to this NNF Node - properties: - health: - description: NnfResourceHealthType defines the health of an - NNF resource. - type: string - hostname: - type: string - id: - description: ID reflects the NNF Node unique identifier for - this NNF Server resource. - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - type: object - type: array - status: - description: Status reflects the current status of the NNF Node - type: string - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - description: Current desired state jsonPath: .spec.state diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index 9cb42005..2b0ddcb1 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -14,225 +14,6 @@ spec: singular: nnfnodestorage scope: Namespaced versions: - - additionalPrinterColumns: - - jsonPath: .status.ready - name: READY - type: string - - jsonPath: .status.error.severity - name: ERROR - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfNodeStorage is the Schema for the NnfNodeStorage API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - NnfNodeStorageSpec defines the desired storage attributes on a NNF Node. - Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. - properties: - blockReference: - description: BlockReference is an object reference to an NnfNodeBlockStorage - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - capacity: - description: Capacity of an individual allocation - format: int64 - type: integer - count: - description: |- - Count is the number of allocations to make on this node. All of the allocations will - be created with the same parameters - minimum: 0 - type: integer - fileSystemType: - default: raw - description: |- - FileSystemType defines the type of the desired filesystem, or raw - block device. - enum: - - raw - - lvm - - zfs - - xfs - - gfs2 - - lustre - type: string - groupID: - description: Group ID for file system - format: int32 - type: integer - lustreStorage: - description: |- - LustreStorageSpec describes the Lustre target created here, if - FileSystemType specifies a Lustre target. - properties: - backFs: - description: BackFs is the type of backing filesystem to use. - enum: - - ldiskfs - - zfs - type: string - fileSystemName: - description: FileSystemName is the fsname parameter for the Lustre - filesystem. - maxLength: 8 - type: string - mgsAddress: - description: |- - MgsAddress is the NID of the MGS to use. This is used only when - creating MDT and OST targets. - type: string - startIndex: - description: |- - StartIndex is used to order a series of MDTs or OSTs. This is used only - when creating MDT and OST targets. If count in the NnfNodeStorageSpec is more - than 1, then StartIndex is the index of the first allocation, and the indexes - increment from there. - minimum: 0 - type: integer - targetType: - description: TargetType is the type of Lustre target to be created. - enum: - - mgt - - mdt - - mgtmdt - - ost - type: string - type: object - sharedAllocation: - description: SharedAllocation is used when a single NnfNodeBlockStorage - allocation is used by multiple NnfNodeStorage allocations - type: boolean - userID: - description: User ID for file system - format: int32 - type: integer - required: - - count - - groupID - - sharedAllocation - - userID - type: object - status: - description: NnfNodeStorageStatus defines the status for NnfNodeStorage - properties: - allocations: - description: Allocations is the list of storage allocations that were - made - items: - description: NnfNodeStorageAllocationStatus defines the allocation - status for each allocation in the NnfNodeStorage - properties: - logicalVolume: - description: Name of the LVM LV - type: string - ready: - type: boolean - volumeGroup: - description: Name of the LVM VG - type: string - type: object - type: array - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - ready: - type: boolean - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.ready name: READY diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml index 6db7514d..7fed2960 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfportmanagers.yaml @@ -14,243 +14,6 @@ spec: singular: nnfportmanager scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfPortManager is the Schema for the nnfportmanagers API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfPortManagerSpec defines the desired state of NnfPortManager - properties: - allocations: - description: |- - Allocations is a list of allocation requests that the Port Manager will attempt - to satisfy. To request port resources from the port manager, clients should add - an entry to the allocations. Entries must be unique. The port manager controller - will attempt to allocate port resources for each allocation specification in the - list. To remove an allocation and free up port resources, remove the allocation - from the list. - items: - description: NnfPortManagerAllocationSpec defines the desired state - for a single port allocation - properties: - count: - default: 1 - description: |- - Count is the number of desired ports the requester needs. The port manager - will attempt to allocate this many ports. - type: integer - requester: - description: Requester is an object reference to the requester - of a ports. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - required: - - count - - requester - type: object - type: array - systemConfiguration: - description: |- - SystemConfiguration is an object reference to the system configuration. The - Port Manager will use the available ports defined in the system configuration. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - required: - - allocations - - systemConfiguration - type: object - status: - description: NnfPortManagerStatus defines the observed state of NnfPortManager - properties: - allocations: - description: Allocations is a list of port allocation status'. - items: - description: NnfPortManagerAllocationStatus defines the allocation - status of a port for a given requester. - properties: - ports: - description: Ports is list of ports allocated to the owning - resource. - items: - type: integer - type: array - requester: - description: |- - Requester is an object reference to the requester of the port resource, if one exists, or - empty otherwise. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - status: - description: Status is the ownership status of the port. - enum: - - InUse - - Free - - Cooldown - - InvalidConfiguration - - InsufficientResources - type: string - timeUnallocated: - description: |- - TimeUnallocated is when the port was unallocated. This is to ensure the proper cooldown - duration. - format: date-time - type: string - required: - - status - type: object - type: array - status: - description: Status is the current status of the port manager. - enum: - - Ready - - SystemConfigurationNotFound - type: string - required: - - status - type: object - type: object - served: false - storage: false - subresources: - status: {} - name: v1alpha2 schema: openAPIV3Schema: diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index adfd59a4..a7feb109 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -14,587 +14,6 @@ spec: singular: nnfstorageprofile scope: Namespaced versions: - - additionalPrinterColumns: - - description: True if this is the default instance - jsonPath: .data.default - name: DEFAULT - type: boolean - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfStorageProfile is the Schema for the nnfstorageprofiles API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - data: - description: NnfStorageProfileData defines the desired state of NnfStorageProfile - properties: - default: - default: false - description: Default is true if this instance is the default resource - to use - type: boolean - gfs2Storage: - description: GFS2Storage defines the GFS2-specific configuration - properties: - capacityScalingFactor: - default: "1.0" - description: CapacityScalingFactor is a scaling factor for the - capacity requested in the DirectiveBreakdown - type: string - commandlines: - description: CmdLines contains commands to create volumes and - filesystems. - properties: - lvChange: - description: LvChange specifies the various lvchange commandlines, - minus the "lvchange" - properties: - activate: - description: The lvchange commandline for activate, minus - the "lvchange" command - type: string - deactivate: - description: The lvchange commandline for deactivate, - minus the "lvchange" command - type: string - type: object - lvCreate: - description: LvCreate specifies the lvcreate commandline, - minus the "lvcreate". - type: string - lvRemove: - description: LvRemove specifies the lvcreate commandline, - minus the "lvremove". - type: string - mkfs: - description: Mkfs specifies the mkfs commandline, minus the - "mkfs". - type: string - mountCompute: - description: MountCompute specifies mount options for mounting - on the Compute. - type: string - mountRabbit: - description: MountRabbit specifies mount options for mounting - on the Rabbit. - type: string - pvCreate: - description: PvCreate specifies the pvcreate commandline, - minus the "pvcreate". - type: string - pvRemove: - description: PvRemove specifies the pvremove commandline, - minus the "pvremove". - type: string - sharedVg: - default: false - description: |- - SharedVg specifies that allocations from a workflow on the same Rabbit should share an - LVM VolumeGroup - type: boolean - vgChange: - description: VgChange specifies the various vgchange commandlines, - minus the "vgchange" - properties: - lockStart: - description: The vgchange commandline for lockStart, minus - the "vgchange" command - type: string - lockStop: - description: The vgchange commandline for lockStop, minus - the "vgchange" command - type: string - type: object - vgCreate: - description: VgCreate specifies the vgcreate commandline, - minus the "vgcreate". - type: string - vgRemove: - description: VgCreate specifies the vgcreate commandline, - minus the "vgremove". - type: string - type: object - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - type: object - lustreStorage: - description: LustreStorage defines the Lustre-specific configuration - properties: - capacityMdt: - default: 5GiB - description: |- - CapacityMDT specifies the size of the MDT device. This is also - used for a combined MGT+MDT device. - pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ - type: string - capacityMgt: - default: 5GiB - description: CapacityMGT specifies the size of the MGT device. - pattern: ^\d+(KiB|KB|MiB|MB|GiB|GB|TiB|TB)$ - type: string - capacityScalingFactor: - default: "1.0" - description: CapacityScalingFactor is a scaling factor for the - OST capacity requested in the DirectiveBreakdown - type: string - combinedMgtMdt: - default: false - description: CombinedMGTMDT indicates whether the MGT and MDT - should be created on the same target device - type: boolean - exclusiveMdt: - default: false - description: ExclusiveMDT indicates that the MDT should not be - colocated with any other target on the chosen server. - type: boolean - externalMgs: - description: |- - ExternalMGS specifies the use of an existing MGS rather than creating one. This can - be either the NID(s) of a pre-existing MGS that should be used, or it can be an NNF Persistent - Instance that was created with the "StandaloneMGTPoolName" option. In the latter case, the format - is "pool:poolName" where "poolName" is the argument from "StandaloneMGTPoolName". A single MGS will - be picked from the pool. - type: string - mdtCommandlines: - description: MdtCmdLines contains commands to create an MDT target. - properties: - mkfs: - description: |- - Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". - Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). - Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. - type: string - mountTarget: - description: |- - MountTarget specifies the mount command line for the lustre target. - For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions - argument to mkfs.lustre instead. - type: string - zpoolCreate: - description: |- - ZpoolCreate specifies the zpool create commandline, minus the "zpool create". - This is where you may specify zpool create options, and the virtual device (vdev) such as - "mirror", or "draid". See zpoolconcepts(7). - type: string - type: object - mdtOptions: - description: MdtOptions contains options to use for libraries - used for an MDT target. - properties: - colocateComputes: - default: false - description: |- - ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection - to the compute nodes in a workflow - type: boolean - count: - description: Count specifies how many Lustre targets to create - minimum: 1 - type: integer - scale: - description: Scale provides a unitless value to determine - how many Lustre targets to create - maximum: 10 - minimum: 1 - type: integer - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - required: - - colocateComputes - type: object - mgtCommandlines: - description: MgtCmdLines contains commands to create an MGT target. - properties: - mkfs: - description: |- - Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". - Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). - Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. - type: string - mountTarget: - description: |- - MountTarget specifies the mount command line for the lustre target. - For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions - argument to mkfs.lustre instead. - type: string - zpoolCreate: - description: |- - ZpoolCreate specifies the zpool create commandline, minus the "zpool create". - This is where you may specify zpool create options, and the virtual device (vdev) such as - "mirror", or "draid". See zpoolconcepts(7). - type: string - type: object - mgtMdtCommandlines: - description: MgtMdtCmdLines contains commands to create a combined - MGT/MDT target. - properties: - mkfs: - description: |- - Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". - Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). - Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. - type: string - mountTarget: - description: |- - MountTarget specifies the mount command line for the lustre target. - For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions - argument to mkfs.lustre instead. - type: string - zpoolCreate: - description: |- - ZpoolCreate specifies the zpool create commandline, minus the "zpool create". - This is where you may specify zpool create options, and the virtual device (vdev) such as - "mirror", or "draid". See zpoolconcepts(7). - type: string - type: object - mgtMdtOptions: - description: MgtMdtOptions contains options to use for libraries - used for a combined MGT/MDT target. - properties: - colocateComputes: - default: false - description: |- - ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection - to the compute nodes in a workflow - type: boolean - count: - description: Count specifies how many Lustre targets to create - minimum: 1 - type: integer - scale: - description: Scale provides a unitless value to determine - how many Lustre targets to create - maximum: 10 - minimum: 1 - type: integer - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - required: - - colocateComputes - type: object - mgtOptions: - description: MgtOptions contains options to use for libraries - used for an MGT target. - properties: - colocateComputes: - default: false - description: |- - ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection - to the compute nodes in a workflow - type: boolean - count: - description: Count specifies how many Lustre targets to create - minimum: 1 - type: integer - scale: - description: Scale provides a unitless value to determine - how many Lustre targets to create - maximum: 10 - minimum: 1 - type: integer - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - required: - - colocateComputes - type: object - mountCompute: - description: MountCompute specifies mount options for making the - Lustre client mount on the Compute. - type: string - mountRabbit: - description: MountRabbit specifies mount options for making the - Lustre client mount on the Rabbit. - type: string - ostCommandlines: - description: OstCmdLines contains commands to create an OST target. - properties: - mkfs: - description: |- - Mkfs specifies the mkfs.lustre commandline, minus the "mkfs.lustre". - Use the --mkfsoptions argument to specify the zfs create options. See zfsprops(7). - Use the --mountfsoptions argument to specify persistent mount options for the lustre targets. - type: string - mountTarget: - description: |- - MountTarget specifies the mount command line for the lustre target. - For persistent mount options for lustre targets, do not use this array; use the --mountfsoptions - argument to mkfs.lustre instead. - type: string - zpoolCreate: - description: |- - ZpoolCreate specifies the zpool create commandline, minus the "zpool create". - This is where you may specify zpool create options, and the virtual device (vdev) such as - "mirror", or "draid". See zpoolconcepts(7). - type: string - type: object - ostOptions: - description: OstOptions contains options to use for libraries - used for an OST target. - properties: - colocateComputes: - default: false - description: |- - ColocateComputes indicates that the Lustre target should be placed on a Rabbit node that has a physical connection - to the compute nodes in a workflow - type: boolean - count: - description: Count specifies how many Lustre targets to create - minimum: 1 - type: integer - scale: - description: Scale provides a unitless value to determine - how many Lustre targets to create - maximum: 10 - minimum: 1 - type: integer - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - required: - - colocateComputes - type: object - standaloneMgtPoolName: - description: |- - StandaloneMGTPoolName creates a Lustre MGT without a MDT or OST. This option can only be used when creating - a persistent Lustre instance. The MGS is placed into a named pool that can be used by the "ExternalMGS" option. - Multiple pools can be created. - type: string - type: object - pinned: - default: false - description: Pinned is true if this instance is an immutable copy - type: boolean - rawStorage: - description: RawStorage defines the Raw-specific configuration - properties: - capacityScalingFactor: - default: "1.0" - description: CapacityScalingFactor is a scaling factor for the - capacity requested in the DirectiveBreakdown - type: string - commandlines: - description: CmdLines contains commands to create volumes and - filesystems. - properties: - lvChange: - description: LvChange specifies the various lvchange commandlines, - minus the "lvchange" - properties: - activate: - description: The lvchange commandline for activate, minus - the "lvchange" command - type: string - deactivate: - description: The lvchange commandline for deactivate, - minus the "lvchange" command - type: string - type: object - lvCreate: - description: LvCreate specifies the lvcreate commandline, - minus the "lvcreate". - type: string - lvRemove: - description: LvRemove specifies the lvcreate commandline, - minus the "lvremove". - type: string - mkfs: - description: Mkfs specifies the mkfs commandline, minus the - "mkfs". - type: string - mountCompute: - description: MountCompute specifies mount options for mounting - on the Compute. - type: string - mountRabbit: - description: MountRabbit specifies mount options for mounting - on the Rabbit. - type: string - pvCreate: - description: PvCreate specifies the pvcreate commandline, - minus the "pvcreate". - type: string - pvRemove: - description: PvRemove specifies the pvremove commandline, - minus the "pvremove". - type: string - sharedVg: - default: false - description: |- - SharedVg specifies that allocations from a workflow on the same Rabbit should share an - LVM VolumeGroup - type: boolean - vgChange: - description: VgChange specifies the various vgchange commandlines, - minus the "vgchange" - properties: - lockStart: - description: The vgchange commandline for lockStart, minus - the "vgchange" command - type: string - lockStop: - description: The vgchange commandline for lockStop, minus - the "vgchange" command - type: string - type: object - vgCreate: - description: VgCreate specifies the vgcreate commandline, - minus the "vgcreate". - type: string - vgRemove: - description: VgCreate specifies the vgcreate commandline, - minus the "vgremove". - type: string - type: object - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - type: object - xfsStorage: - description: XFSStorage defines the XFS-specific configuration - properties: - capacityScalingFactor: - default: "1.0" - description: CapacityScalingFactor is a scaling factor for the - capacity requested in the DirectiveBreakdown - type: string - commandlines: - description: CmdLines contains commands to create volumes and - filesystems. - properties: - lvChange: - description: LvChange specifies the various lvchange commandlines, - minus the "lvchange" - properties: - activate: - description: The lvchange commandline for activate, minus - the "lvchange" command - type: string - deactivate: - description: The lvchange commandline for deactivate, - minus the "lvchange" command - type: string - type: object - lvCreate: - description: LvCreate specifies the lvcreate commandline, - minus the "lvcreate". - type: string - lvRemove: - description: LvRemove specifies the lvcreate commandline, - minus the "lvremove". - type: string - mkfs: - description: Mkfs specifies the mkfs commandline, minus the - "mkfs". - type: string - mountCompute: - description: MountCompute specifies mount options for mounting - on the Compute. - type: string - mountRabbit: - description: MountRabbit specifies mount options for mounting - on the Rabbit. - type: string - pvCreate: - description: PvCreate specifies the pvcreate commandline, - minus the "pvcreate". - type: string - pvRemove: - description: PvRemove specifies the pvremove commandline, - minus the "pvremove". - type: string - sharedVg: - default: false - description: |- - SharedVg specifies that allocations from a workflow on the same Rabbit should share an - LVM VolumeGroup - type: boolean - vgChange: - description: VgChange specifies the various vgchange commandlines, - minus the "vgchange" - properties: - lockStart: - description: The vgchange commandline for lockStart, minus - the "vgchange" command - type: string - lockStop: - description: The vgchange commandline for lockStop, minus - the "vgchange" command - type: string - type: object - vgCreate: - description: VgCreate specifies the vgcreate commandline, - minus the "vgcreate". - type: string - vgRemove: - description: VgCreate specifies the vgcreate commandline, - minus the "vgremove". - type: string - type: object - storageLabels: - description: |- - Storagelabels defines a list of labels that are added to the DirectiveBreakdown - labels constraint. This restricts allocations to Storage resources with these labels - items: - type: string - type: array - type: object - required: - - gfs2Storage - - lustreStorage - - rawStorage - - xfsStorage - type: object - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - type: object - served: false - storage: false - subresources: {} - additionalPrinterColumns: - description: True if this is the default instance jsonPath: .data.default diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index 767a9f4c..3bad19ad 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -14,301 +14,6 @@ spec: singular: nnfstorage scope: Namespaced versions: - - additionalPrinterColumns: - - jsonPath: .status.ready - name: READY - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - - jsonPath: .status.error.severity - name: ERROR - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfStorage is the Schema for the storages API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - NnfStorageSpec defines the specification for requesting generic storage on a set - of available NNF Nodes. This object is related to a #DW for NNF Storage, with the WLM - making the determination for which NNF Nodes it wants to utilize. - properties: - allocationSets: - description: |- - AllocationSets is a list of different types of storage allocations to make. Each - AllocationSet describes an entire allocation spanning multiple Rabbits. For example, - an AllocationSet could be all of the OSTs in a Lustre filesystem, or all of the raw - block devices in a raw block configuration. - items: - description: NnfStorageAllocationSetSpec defines the details for - an allocation set - properties: - backFs: - description: BackFs is the type of backing filesystem to use. - enum: - - ldiskfs - - zfs - type: string - capacity: - description: |- - Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself - may split the storage among the available drives operating in the NNF Node. - format: int64 - type: integer - mgsAddress: - description: |- - MgsAddress is the NID of the MGS when a pre-existing MGS is - provided in the NnfStorageProfile - type: string - name: - description: Name is a human readable label for this set of - allocations (e.g., xfs) - type: string - nodes: - description: Nodes is the list of Rabbit nodes to make allocations - on - items: - description: NnfStorageAllocationNodes identifies the node - and properties of the allocation to make on that node - properties: - count: - description: Number of allocations to make on this node - type: integer - name: - description: Name of the node to make the allocation on - type: string - required: - - count - - name - type: object - type: array - persistentMgsReference: - description: |- - PersistentMgsReference is a reference to a persistent storage that is providing - the external MGS. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - sharedAllocation: - description: |- - SharedAllocation shares a single block storage allocation between multiple file system allocations - (within the same workflow) on a Rabbit - type: boolean - targetType: - description: TargetType is the type of Lustre target to be created. - enum: - - mgt - - mdt - - mgtmdt - - ost - type: string - required: - - capacity - - name - - nodes - - sharedAllocation - type: object - type: array - fileSystemType: - default: raw - description: |- - FileSystemType defines the type of the desired filesystem, or raw - block device. - enum: - - raw - - lvm - - zfs - - xfs - - gfs2 - - lustre - type: string - groupID: - description: Group ID for file system - format: int32 - type: integer - userID: - description: User ID for file system - format: int32 - type: integer - required: - - allocationSets - - groupID - - userID - type: object - status: - description: NnfStorageStatus defines the observed status of NNF Storage. - properties: - allocationSets: - description: |- - AllocationsSets holds the status information for each of the AllocationSets - from the spec. - items: - description: NnfStorageAllocationSetStatus contains the status information - for an allocation set - properties: - allocationCount: - description: |- - AllocationCount is the total number of allocations that currently - exist - type: integer - ready: - type: boolean - required: - - allocationCount - type: object - type: array - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - fileSystemName: - description: FileSystemName is the fsname parameter for the Lustre - filesystem. - maxLength: 8 - type: string - lustreMgtReference: - description: |- - LustgreMgtReference is an object reference to the NnfLustreMGT resource used - by the NnfStorage - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - mgsAddress: - description: MgsAddress is the NID of the MGS. - type: string - ready: - description: Ready reflects the status of this NNF Storage - type: boolean - type: object - type: object - served: false - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .status.ready name: READY diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml index 4dc25e43..fec341f2 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml @@ -14,240 +14,6 @@ spec: singular: nnfsystemstorage scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: NnfSystemStorage is the Schema for the nnfsystemstorages API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: NnfSystemStorageSpec defines the desired state of NnfSystemStorage - properties: - capacity: - default: 1073741824 - description: Capacity is the allocation size on each Rabbit - format: int64 - type: integer - clientMountPath: - description: ClientMountPath is an optional path for where to mount - the file system on the computes - type: string - computesPattern: - description: |- - ComputesPattern is a list of compute node indexes (0-15) to make the storage accessible to. This - is only used if ComputesTarget is "pattern" - items: - type: integer - maxItems: 16 - type: array - computesTarget: - default: all - description: ComputesTarget specifies which computes to make the storage - accessible to - enum: - - all - - even - - odd - - pattern - type: string - excludeComputes: - description: |- - ExcludeComputes is a list of compute nodes to exclude from the the compute nodes listed in the - SystemConfiguration - items: - type: string - type: array - excludeRabbits: - description: ExludeRabbits is a list of Rabbits to exclude from the - Rabbits in the SystemConfiguration - items: - type: string - type: array - includeComputes: - description: |- - IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes - from the SystemConfiguration - items: - type: string - type: array - includeRabbits: - description: |- - IncludeRabbits is a list of Rabbits to use rather than getting the list of Rabbits from the - SystemConfiguration - items: - type: string - type: array - makeClientMounts: - default: false - description: |- - MakeClientMounts specifies whether to make ClientMount resources or just - make the devices available to the client - type: boolean - storageProfile: - description: StorageProfile is an object reference to the storage - profile to use - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - systemConfiguration: - description: |- - SystemConfiguration is an object reference to the SystemConfiguration resource to use. If this - field is empty, name: default namespace: default is used. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: - default: raw - description: Type is the file system type to use for the storage allocation - enum: - - raw - - xfs - - gfs2 - type: string - required: - - capacity - - makeClientMounts - - storageProfile - type: object - status: - description: NnfSystemStorageStatus defines the observed state of NnfSystemStorage - properties: - error: - description: Error information - properties: - debugMessage: - description: Internal debug message for the error - type: string - severity: - description: |- - Indication of how severe the error is. Minor will likely succeed, Major may - succeed, and Fatal will never succeed. - enum: - - Minor - - Major - - Fatal - type: string - type: - description: Internal or user error - enum: - - Internal - - User - - WLM - type: string - userMessage: - description: Optional user facing message if the error is relevant - to an end user - type: string - required: - - debugMessage - - severity - - type - type: object - ready: - description: Ready signifies whether all work has been completed - type: boolean - required: - - ready - type: object - type: object - served: false - storage: false - subresources: - status: {} - name: v1alpha2 schema: openAPIV3Schema: diff --git a/internal/controller/conversion_test.go b/internal/controller/conversion_test.go index 2fd1796d..a72b9800 100644 --- a/internal/controller/conversion_test.go +++ b/internal/controller/conversion_test.go @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" @@ -79,12 +78,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfAccess resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfAccess{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfAccess resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfAccess{} @@ -155,12 +148,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfContainerProfile resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfContainerProfile{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfContainerProfile resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfContainerProfile{} @@ -226,12 +213,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfDataMovement resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfDataMovement{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfDataMovement resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfDataMovement{} @@ -306,12 +287,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfDataMovementManager resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfDataMovementManager{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfDataMovementManager resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfDataMovementManager{} @@ -377,12 +352,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfDataMovementProfile resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfDataMovementProfile{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfDataMovementProfile resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfDataMovementProfile{} @@ -450,12 +419,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfLustreMGT resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfLustreMGT{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfLustreMGT resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfLustreMGT{} @@ -523,12 +486,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfNode resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfNode{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfNode resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfNode{} @@ -594,12 +551,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfNodeBlockStorage resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfNodeBlockStorage{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfNodeBlockStorage resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfNodeBlockStorage{} @@ -665,12 +616,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfNodeECData resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfNodeECData{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfNodeECData resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfNodeECData{} @@ -736,12 +681,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfNodeStorage resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfNodeStorage{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfNodeStorage resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfNodeStorage{} @@ -809,12 +748,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfPortManager resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfPortManager{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfPortManager resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfPortManager{} @@ -882,12 +815,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfStorage resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfStorage{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfStorage resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfStorage{} @@ -953,12 +880,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfStorageProfile resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfStorageProfile{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfStorageProfile resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfStorageProfile{} @@ -1024,12 +945,6 @@ var _ = Describe("Conversion Webhook Test", func() { } }) - It("is unable to read NnfSystemStorage resource via spoke v1alpha1", func() { - // Spoke should have annotation. - resSpoke := &nnfv1alpha1.NnfSystemStorage{} - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(resHub), resSpoke)).ToNot(Succeed()) - }) - It("reads NnfSystemStorage resource via hub and via spoke v1alpha2", func() { // Spoke should have annotation. resSpoke := &nnfv1alpha2.NnfSystemStorage{} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 5fe01a19..874cb09e 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -50,7 +50,6 @@ import ( lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" nnf "github.com/NearNodeFlash/nnf-ec/pkg" - nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" nnfv1alpha2 "github.com/NearNodeFlash/nnf-sos/api/v1alpha2" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" @@ -143,9 +142,6 @@ var _ = BeforeSuite(func() { err = lusv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = nnfv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = nnfv1alpha2.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) From 5fbad9049c50a57427da3eb5e4bb8eb5e9afed90 Mon Sep 17 00:00:00 2001 From: Blake Devcich <89158881+bdevcich@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:21:54 -0600 Subject: [PATCH 13/23] Fix aggressive lvRemove and vgRemove (#418) The default storage profile was only using the $VG_NAME, causing lvRemove to remove all logical volumes in the volume group. Additionally, volume groups were being removed without first checking to see if logical volumes exist in the volume group. While both of these issues have no real affect on the removal of the LVs/VGs, this was causing issues with the new PreUnmount commands. The first logical volume to run the lvRemove command was the only fileysystem to run the PreUnmount commands, since it destroys all the other filesystems on the rabbit in that single volume group. With this change, the PreUnmmount command runs on each filesystem before it is destroyed. Signed-off-by: Blake Devcich --- config/examples/nnf_nnfstorageprofile.yaml | 6 +++--- pkg/blockdevice/lvm.go | 8 +++++++- pkg/blockdevice/lvm/volume_groups.go | 18 +++++++++++++++++- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/config/examples/nnf_nnfstorageprofile.yaml b/config/examples/nnf_nnfstorageprofile.yaml index 73b0a676..d8c39dec 100644 --- a/config/examples/nnf_nnfstorageprofile.yaml +++ b/config/examples/nnf_nnfstorageprofile.yaml @@ -50,7 +50,7 @@ data: lvChange: activate: --activate ys $VG_NAME/$LV_NAME deactivate: --activate n $VG_NAME/$LV_NAME - lvRemove: $VG_NAME + lvRemove: $VG_NAME/$LV_NAME mkfs: -j2 -p $PROTOCOL -t $CLUSTER_NAME:$LOCK_SPACE $DEVICE mountRabbit: $DEVICE $MOUNT_PATH mountCompute: $DEVICE $MOUNT_PATH @@ -70,7 +70,7 @@ data: lvChange: activate: --activate y $VG_NAME/$LV_NAME deactivate: --activate n $VG_NAME/$LV_NAME - lvRemove: $VG_NAME + lvRemove: $VG_NAME/$LV_NAME mkfs: $DEVICE mountRabbit: $DEVICE $MOUNT_PATH mountCompute: $DEVICE $MOUNT_PATH @@ -90,4 +90,4 @@ data: lvChange: activate: --activate y $VG_NAME/$LV_NAME deactivate: --activate n $VG_NAME/$LV_NAME - lvRemove: $VG_NAME + lvRemove: $VG_NAME/$LV_NAME diff --git a/pkg/blockdevice/lvm.go b/pkg/blockdevice/lvm.go index f02f48e8..52a2e9d5 100644 --- a/pkg/blockdevice/lvm.go +++ b/pkg/blockdevice/lvm.go @@ -140,12 +140,18 @@ func (l *Lvm) Destroy(ctx context.Context) (bool, error) { destroyed, err := l.LogicalVolume.Remove(ctx, l.CommandArgs.LvArgs.Remove) if err != nil { return false, err - } if destroyed { objectDestroyed = true } + // Check to ensure the VG has no LVs before removing + if count, err := l.VolumeGroup.NumLVs(ctx); err != nil { + return false, err + } else if count != 0 { + return objectDestroyed, nil + } + destroyed, err = l.VolumeGroup.Remove(ctx, l.CommandArgs.VgArgs.Remove) if err != nil { return false, err diff --git a/pkg/blockdevice/lvm/volume_groups.go b/pkg/blockdevice/lvm/volume_groups.go index acff13d2..334d4fd8 100644 --- a/pkg/blockdevice/lvm/volume_groups.go +++ b/pkg/blockdevice/lvm/volume_groups.go @@ -174,7 +174,7 @@ func (vg *VolumeGroup) LockStop(ctx context.Context, rawArgs string) (bool, erro return false, err } - if exists == false { + if !exists { return false, nil } @@ -211,3 +211,19 @@ func (vg *VolumeGroup) Remove(ctx context.Context, rawArgs string) (bool, error) return false, nil } + +func (vg *VolumeGroup) NumLVs(ctx context.Context) (int, error) { + count := 0 + + lvs, err := lvsListVolumes(ctx, vg.Log) + if err != nil { + return count, err + } + for _, lv := range lvs { + if lv.VGName == vg.Name { + count += 1 + } + } + + return count, nil +} From 26913a7b31a3811cc2c1ea0110fb7f1855c91dde Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Fri, 22 Nov 2024 10:56:58 -0600 Subject: [PATCH 14/23] Enable mock filesystems in KIND (#420) On the docker host, the /tmp/nnf dir is expected to be mounted into each docker container as /mnt/nnf. This should be specified as an extraMounts in the KIND config file. The k8s pods will add volume mounts for /mnt/nnf. When using KIND, new mock devices will be represented as directories in /mnt/nnf. New filesystems will be represented as directories in /mnt/nnf, containing per-rabbit symlinks back to the mock "device" directory. For a user container to use these mock filesystems, their pod must have a volume mount for the mock filesystem and for the mock device directory, where the symlink is pointing. Signed-off-by: Dean Roehrich --- config/kind/manager_volumes_patch.yaml | 9 ++-- internal/controller/filesystem_helpers.go | 39 +++++++++++++-- ...f_workflow_controller_container_helpers.go | 10 ++-- .../nnf_workflow_controller_helpers.go | 47 +++++++++++++++++++ pkg/filesystem/kind.go | 22 +++++---- 5 files changed, 107 insertions(+), 20 deletions(-) diff --git a/config/kind/manager_volumes_patch.yaml b/config/kind/manager_volumes_patch.yaml index a6bf6487..3da27b6c 100644 --- a/config/kind/manager_volumes_patch.yaml +++ b/config/kind/manager_volumes_patch.yaml @@ -10,15 +10,16 @@ spec: - name: manager workingDir: /localdisk volumeMounts: - - mountPath: /mnt - name: mnt-dir + - mountPath: /mnt/nnf + name: mock-filesystems mountPropagation: Bidirectional - mountPath: /localdisk name: localdisk volumes: - - name: mnt-dir + - name: mock-filesystems hostPath: - path: /mnt + path: /mnt/nnf + type: DirectoryOrCreate - name: localdisk hostPath: type: DirectoryOrCreate diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index cb176cb3..4e34caa2 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -42,6 +42,21 @@ import ( //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/finalizers,verbs=update //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;create;list;watch;update;patch;delete;deletecollection +func getBlockDeviceAndFileSystemForMock(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { + + blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create mock block device").WithError(err).WithMajor() + } + + fileSystem, err := newMockFileSystem(nnfNodeStorage, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create mock file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil +} + func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) @@ -49,7 +64,7 @@ func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nn return nil, nil, dwsv1alpha2.NewResourceError("could not create mock block device").WithError(err).WithMajor() } - fileSystem, err := newMockFileSystem(ctx, c, nnfNodeStorage, blockDevice, index, log) + fileSystem, err := newKindFileSystem(nnfNodeStorage, index, log) if err != nil { return nil, nil, dwsv1alpha2.NewResourceError("could not create mock file system").WithError(err).WithMajor() } @@ -59,8 +74,11 @@ func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nn // getBlockDeviceAndFileSystem returns blockdevice and filesystem interfaces based on the allocation type and NnfStorageProfile. func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { - _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") - if found || os.Getenv("ENVIRONMENT") == "kind" { + if _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT"); found { + return getBlockDeviceAndFileSystemForMock(ctx, c, nnfNodeStorage, index, log) + + } + if os.Getenv("ENVIRONMENT") == "kind" { return getBlockDeviceAndFileSystemForKind(ctx, c, nnfNodeStorage, index, log) } @@ -444,7 +462,7 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n return &fs, nil } -func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { +func newMockFileSystem(nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (filesystem.FileSystem, error) { path := os.Getenv("MOCK_FILE_SYSTEM_PATH") if len(path) == 0 { path = "/mnt/filesystems" @@ -454,8 +472,21 @@ func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnf Log: log, Path: fmt.Sprintf("/%s/%s-%d", path, nnfNodeStorage.GetName(), index), } + return &fs, nil +} +func newKindFileSystem(nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, index int, log logr.Logger) (filesystem.FileSystem, error) { + path := os.Getenv("MOCK_FILE_SYSTEM_PATH") + if len(path) == 0 { + path = "/mnt/nnf" + } + + fs := filesystem.KindFileSystem{ + Log: log, + Path: fmt.Sprintf("/%s/%s-%d", path, nnfNodeStorage.GetName(), index), + } return &fs, nil + } func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha4.NnfNodeStorage, targetType string, index int) (string, error) { diff --git a/internal/controller/nnf_workflow_controller_container_helpers.go b/internal/controller/nnf_workflow_controller_container_helpers.go index a0579dcb..a8f2533a 100644 --- a/internal/controller/nnf_workflow_controller_container_helpers.go +++ b/internal/controller/nnf_workflow_controller_container_helpers.go @@ -538,10 +538,12 @@ func (c *nnfUserContainer) addNnfVolumes(spec *corev1.PodSpec) { MountPath: vol.mountPath, }) - container.Env = append(container.Env, corev1.EnvVar{ - Name: vol.envVarName, - Value: vol.mountPath, - }) + if vol.envVarName != "" { + container.Env = append(container.Env, corev1.EnvVar{ + Name: vol.envVarName, + Value: vol.mountPath, + }) + } } } } diff --git a/internal/controller/nnf_workflow_controller_helpers.go b/internal/controller/nnf_workflow_controller_helpers.go index de045059..1ec5eb04 100644 --- a/internal/controller/nnf_workflow_controller_helpers.go +++ b/internal/controller/nnf_workflow_controller_helpers.go @@ -1905,6 +1905,17 @@ func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow * return jobList, nil } +func (r *NnfWorkflowReconciler) getNnfNodeStorages(ctx context.Context, workflow *dwsv1alpha2.Workflow) (*nnfv1alpha4.NnfNodeStorageList, error) { + matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) + + nodeStorages := &nnfv1alpha4.NnfNodeStorageList{} + if err := r.List(ctx, nodeStorages, matchLabels); err != nil { + return nil, dwsv1alpha2.NewResourceError("could not retrieve NnfNodeStorages").WithError(err).WithMajor() + } + + return nodeStorages, nil +} + // Create a list of volumes to be mounted inside of the containers based on the DW_JOB/DW_PERSISTENT arguments func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha4.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { volumes := []nnfContainerVolume{} @@ -1991,9 +2002,45 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo volumes = append(volumes, vol) } + if os.Getenv("ENVIRONMENT") == "kind" { + devVolumes, err := r.findMockDevicesForKind(ctx, workflow) + if err != nil { + return nil, nil, err + } + volumes = append(volumes, devVolumes...) + } return volumes, nil, nil } +// If we're using the KIND mock storage then we also have to create a volume +// mount for the path that represents the device beneath the filesystem. +func (r *NnfWorkflowReconciler) findMockDevicesForKind(ctx context.Context, workflow *dwsv1alpha2.Workflow) ([]nnfContainerVolume, error) { + volumes := []nnfContainerVolume{} + + nodeStoragesList, err := r.getNnfNodeStorages(ctx, workflow) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not find devices for KIND environment").WithError(err) + } + // On GFS2, the same device is visible on multiple rabbits. Track dupes + // and add a mount for only one of them. + devNames := make(map[string]struct{}) + devCount := 0 + for _, nodeStorage := range nodeStoragesList.Items { + if _, found := devNames[nodeStorage.GetName()]; !found { + for idx := 0; idx < nodeStorage.Spec.Count; idx++ { + vol := nnfContainerVolume{ + name: fmt.Sprintf("kind-device-%d", devCount), + mountPath: fmt.Sprintf("/mnt/nnf/%s-%d", nodeStorage.GetName(), idx), + } + volumes = append(volumes, vol) + devCount += 1 + } + devNames[nodeStorage.GetName()] = struct{}{} + } + } + return volumes, nil +} + // Use the container profile to determine how many ports are needed and request them from the default NnfPortManager func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { profile, err := getContainerProfile(ctx, r.Client, workflow, index) diff --git a/pkg/filesystem/kind.go b/pkg/filesystem/kind.go index 7aae05ba..aa799fb4 100644 --- a/pkg/filesystem/kind.go +++ b/pkg/filesystem/kind.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "os" + "path/filepath" "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" "github.com/go-logr/logr" @@ -47,7 +48,7 @@ func (m *KindFileSystem) Create(ctx context.Context, complete bool) (bool, error return false, fmt.Errorf("could not create mount directory %s: %w", m.Path, err) } - m.Log.Info("Created mock file system", "path", m.Path) + m.Log.Info("Created mock file system in kind", "path", m.Path) return true, nil } @@ -55,7 +56,7 @@ func (m *KindFileSystem) Destroy(ctx context.Context) (bool, error) { // Remove the directory. If it fails don't worry about it. _ = os.RemoveAll(m.Path) - m.Log.Info("Destroyed mock file system") + m.Log.Info("Destroyed mock file system in kind") return true, nil } @@ -64,12 +65,12 @@ func (m *KindFileSystem) Activate(ctx context.Context, complete bool) (bool, err return false, nil } - m.Log.Info("Activated mock file system") + m.Log.Info("Activated mock file system in kind") return true, nil } func (m *KindFileSystem) Deactivate(ctx context.Context) (bool, error) { - m.Log.Info("Deactivated mock file system") + m.Log.Info("Deactivated mock file system in kind") return true, nil } @@ -78,11 +79,16 @@ func (m *KindFileSystem) Mount(ctx context.Context, path string, complete bool) return false, nil } + bn := filepath.Dir(path) + if err := os.MkdirAll(bn, 0755); err != nil { + return false, fmt.Errorf("could not create directory for symlink %s: %w", bn, err) + } + if err := os.Symlink(m.Path, path); err != nil { return false, fmt.Errorf("could not create symlink mount %s: %w", path, err) } - m.Log.Info("Mounted mock file system", "filesystem", m.Path, "mount", path) + m.Log.Info("Mounted mock file system in kind", "filesystem", m.Path, "mount", path) return true, nil } @@ -90,7 +96,7 @@ func (m *KindFileSystem) Unmount(ctx context.Context, path string) (bool, error) // Remove the directory. If it fails don't worry about it. _ = os.Remove(path) - m.Log.Info("Unmounted mock file system") + m.Log.Info("Unmounted mock file system in kind") return true, nil } @@ -99,13 +105,13 @@ func (m *KindFileSystem) PostActivate(ctx context.Context, complete bool) (bool, return false, nil } - m.Log.Info("Ran PostActivate") + m.Log.Info("Ran PostActivate in kind") return true, nil } func (m *KindFileSystem) PreDeactivate(ctx context.Context) (bool, error) { - m.Log.Info("Ran PreDeactivate") + m.Log.Info("Ran PreDeactivate in kind") return true, nil } From c07ab8f13689f118bb6c0b8f1211841757207531 Mon Sep 17 00:00:00 2001 From: matthew-richerson <82597529+matthew-richerson@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:01:36 -0600 Subject: [PATCH 15/23] Add IgnoreOfflineComputes option to NnfAccess and NnfSystemStorage (#421) This option is used to allow the NnfSystemStorage to succeed even when there are computes that are offline. As computes come back online, they will be given access to the storage. Signed-off-by: Matt Richerson --- api/v1alpha2/conversion.go | 15 ++++++- api/v1alpha2/zz_generated.conversion.go | 41 +++++++++++++------ api/v1alpha3/conversion.go | 15 ++++++- api/v1alpha3/zz_generated.conversion.go | 41 +++++++++++++------ api/v1alpha4/nnfaccess_types.go | 3 ++ api/v1alpha4/nnfsystemstorage_types.go | 3 ++ .../bases/nnf.cray.hpe.com_nnfaccesses.yaml | 4 ++ .../nnf.cray.hpe.com_nnfsystemstorages.yaml | 4 ++ internal/controller/nnf_access_controller.go | 28 +++++++++++++ .../controller/nnfsystemstorage_controller.go | 1 + 10 files changed, 129 insertions(+), 26 deletions(-) diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index 3825f60b..e3f81b29 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -42,13 +42,20 @@ func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfAccess{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.IgnoreOfflineComputes = restored.Spec.IgnoreOfflineComputes + } else { + dst.Spec.IgnoreOfflineComputes = false + } + return nil } @@ -494,8 +501,10 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { if hasAnno { dst.Spec.Shared = restored.Spec.Shared + dst.Spec.IgnoreOfflineComputes = restored.Spec.IgnoreOfflineComputes } else { dst.Spec.Shared = true + dst.Spec.IgnoreOfflineComputes = false } return nil @@ -648,3 +657,7 @@ func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha2_NnfStorageProf func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in, out, s) } + +func Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *nnfv1alpha4.NnfAccessSpec, out *NnfAccessSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) +} diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index c90daa08..d6cb21a2 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -78,11 +78,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -938,7 +938,17 @@ func Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(in *v1alpha4.NnfAccess, ou func autoConvert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfAccess, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -949,7 +959,17 @@ func Convert_v1alpha2_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, func autoConvert_v1alpha4_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfAccess, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfAccess_To_v1alpha2_NnfAccess(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -988,14 +1008,10 @@ func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha4.N out.MakeClientMounts = in.MakeClientMounts out.MountPathPrefix = in.MountPathPrefix out.StorageReference = in.StorageReference + // WARNING: in.IgnoreOfflineComputes requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) -} - func autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready @@ -3203,6 +3219,7 @@ func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec( out.Type = in.Type // WARNING: in.Shared requires manual conversion: does not exist in peer-type out.StorageProfile = in.StorageProfile + // WARNING: in.IgnoreOfflineComputes requires manual conversion: does not exist in peer-type out.MakeClientMounts = in.MakeClientMounts out.ClientMountPath = in.ClientMountPath return nil diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index 32a98544..3e3ea021 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -42,13 +42,20 @@ func (src *NnfAccess) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfAccess{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.IgnoreOfflineComputes = restored.Spec.IgnoreOfflineComputes + } else { + dst.Spec.IgnoreOfflineComputes = false + } + return nil } @@ -501,8 +508,10 @@ func (src *NnfSystemStorage) ConvertTo(dstRaw conversion.Hub) error { if hasAnno { dst.Spec.Shared = restored.Spec.Shared + dst.Spec.IgnoreOfflineComputes = restored.Spec.IgnoreOfflineComputes } else { dst.Spec.Shared = true + dst.Spec.IgnoreOfflineComputes = false } return nil @@ -662,3 +671,7 @@ func Convert_v1alpha4_NnfStorageProfileLustreCmdLines_To_v1alpha3_NnfStorageProf func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in *nnfv1alpha4.NnfSystemStorageSpec, out *NnfSystemStorageSpec, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in, out, s) } + +func Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *nnfv1alpha4.NnfAccessSpec, out *NnfAccessSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index 3470fa2c..c19ca524 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -78,11 +78,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfAccessStatus)(nil), (*v1alpha4.NnfAccessStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(a.(*NnfAccessStatus), b.(*v1alpha4.NnfAccessStatus), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -938,7 +938,17 @@ func Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(in *v1alpha4.NnfAccess, ou func autoConvert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, out *v1alpha4.NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfAccess)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfAccess, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -949,7 +959,17 @@ func Convert_v1alpha3_NnfAccessList_To_v1alpha4_NnfAccessList(in *NnfAccessList, func autoConvert_v1alpha4_NnfAccessList_To_v1alpha3_NnfAccessList(in *v1alpha4.NnfAccessList, out *NnfAccessList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfAccess)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfAccess, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfAccess_To_v1alpha3_NnfAccess(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -988,14 +1008,10 @@ func autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *v1alpha4.N out.MakeClientMounts = in.MakeClientMounts out.MountPathPrefix = in.MountPathPrefix out.StorageReference = in.StorageReference + // WARNING: in.IgnoreOfflineComputes requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec is an autogenerated conversion function. -func Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *v1alpha4.NnfAccessSpec, out *NnfAccessSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) -} - func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha4_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha4.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready @@ -3202,6 +3218,7 @@ func autoConvert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec( out.Type = in.Type // WARNING: in.Shared requires manual conversion: does not exist in peer-type out.StorageProfile = in.StorageProfile + // WARNING: in.IgnoreOfflineComputes requires manual conversion: does not exist in peer-type out.MakeClientMounts = in.MakeClientMounts out.ClientMountPath = in.ClientMountPath return nil diff --git a/api/v1alpha4/nnfaccess_types.go b/api/v1alpha4/nnfaccess_types.go index 2b33e617..55956f93 100644 --- a/api/v1alpha4/nnfaccess_types.go +++ b/api/v1alpha4/nnfaccess_types.go @@ -71,6 +71,9 @@ type NnfAccessSpec struct { // StorageReference is the NnfStorage reference StorageReference corev1.ObjectReference `json:"storageReference"` + + // +kubebuilder:default=false + IgnoreOfflineComputes bool `json:"ignoreOfflineComputes"` } // NnfAccessStatus defines the observed state of NnfAccess diff --git a/api/v1alpha4/nnfsystemstorage_types.go b/api/v1alpha4/nnfsystemstorage_types.go index 2e29de8b..62abe589 100644 --- a/api/v1alpha4/nnfsystemstorage_types.go +++ b/api/v1alpha4/nnfsystemstorage_types.go @@ -91,6 +91,9 @@ type NnfSystemStorageSpec struct { // StorageProfile is an object reference to the storage profile to use StorageProfile corev1.ObjectReference `json:"storageProfile"` + // +kubebuilder:default:=false + IgnoreOfflineComputes bool `json:"ignoreOfflineComputes"` + // MakeClientMounts specifies whether to make ClientMount resources or just // make the devices available to the client // +kubebuilder:default:=false diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml index c9c82df9..9c0a2ad6 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfaccesses.yaml @@ -615,6 +615,9 @@ spec: description: GroupID for the new mount. Currently only used for raw format: int32 type: integer + ignoreOfflineComputes: + default: false + type: boolean makeClientMounts: default: true description: |- @@ -706,6 +709,7 @@ spec: required: - desiredState - groupID + - ignoreOfflineComputes - makeClientMounts - storageReference - target diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml index fec341f2..9fb38926 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfsystemstorages.yaml @@ -565,6 +565,9 @@ spec: items: type: string type: array + ignoreOfflineComputes: + default: false + type: boolean includeComputes: description: |- IncludeComputes is a list of computes nodes to use rather than getting the list of compute nodes @@ -692,6 +695,7 @@ spec: type: string required: - capacity + - ignoreOfflineComputes - makeClientMounts - shared - storageProfile diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index ab206a52..d5fdf698 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -870,6 +870,34 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a for allocationIndex, allocation := range nnfNodeBlockStorage.Spec.Allocations { for _, nodeName := range allocation.Access { blockAccess, exists := nnfNodeBlockStorage.Status.Allocations[allocationIndex].Accesses[nodeName] + if access.Spec.IgnoreOfflineComputes { + storage := &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeBlockStorage.GetNamespace(), + Namespace: corev1.NamespaceDefault, + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { + return false, err + } + + computeOffline := false + for _, compute := range storage.Status.Access.Computes { + if compute.Name != nodeName { + continue + } + + if compute.Status == dwsv1alpha2.OfflineStatus { + computeOffline = true + } + } + + // If the compute is offline, don't check its status + if computeOffline { + continue + } + } // if the map entry doesn't exist in the status section for this node yet, then keep waiting if !exists { diff --git a/internal/controller/nnfsystemstorage_controller.go b/internal/controller/nnfsystemstorage_controller.go index 30d95f4c..72b66a23 100644 --- a/internal/controller/nnfsystemstorage_controller.go +++ b/internal/controller/nnfsystemstorage_controller.go @@ -610,6 +610,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys } else { nnfAccess.Spec.Target = "single" } + nnfAccess.Spec.IgnoreOfflineComputes = nnfSystemStorage.Spec.IgnoreOfflineComputes nnfAccess.Spec.MakeClientMounts = nnfSystemStorage.Spec.MakeClientMounts nnfAccess.Spec.MountPath = nnfSystemStorage.Spec.ClientMountPath nnfAccess.Spec.ClientReference = corev1.ObjectReference{ From 02711232a9ab63df341040c949005d90e1358665 Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Tue, 26 Nov 2024 09:02:52 -0600 Subject: [PATCH 16/23] Add mkdirCommand to NnfDataMovementProfile (#422) Signed-off-by: Dean Roehrich --- api/v1alpha2/conversion.go | 12 +++++- api/v1alpha2/zz_generated.conversion.go | 40 +++++++++++++------ api/v1alpha3/conversion.go | 11 ++++- api/v1alpha3/zz_generated.conversion.go | 40 +++++++++++++------ api/v1alpha4/nnfdatamovementprofile_types.go | 14 ++++++- ....cray.hpe.com_nnfdatamovementprofiles.yaml | 17 +++++++- 6 files changed, 106 insertions(+), 28 deletions(-) diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index e3f81b29..8806dd0d 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -177,13 +177,19 @@ func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfDataMovementProfile{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } + // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Data.MkdirCommand = restored.Data.MkdirCommand + } + return nil } @@ -661,3 +667,7 @@ func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in * func Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *nnfv1alpha4.NnfAccessSpec, out *NnfAccessSpec, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in, out, s) } + +func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *nnfv1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) +} diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index d6cb21a2..3dd14989 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -223,11 +223,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -1432,17 +1432,23 @@ func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovement out.ProgressIntervalSeconds = in.ProgressIntervalSeconds out.CreateDestDir = in.CreateDestDir out.StatCommand = in.StatCommand + // WARNING: in.MkdirCommand requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) -} - func autoConvert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfDataMovementProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -1453,7 +1459,17 @@ func Convert_v1alpha2_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProf func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha2_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha2_NnfDataMovementProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index 3e3ea021..14e5819c 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -177,13 +177,18 @@ func (src *NnfDataMovementProfile) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfDataMovementProfile{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Data.MkdirCommand = restored.Data.MkdirCommand + } + return nil } @@ -675,3 +680,7 @@ func Convert_v1alpha4_NnfSystemStorageSpec_To_v1alpha3_NnfSystemStorageSpec(in * func Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *nnfv1alpha4.NnfAccessSpec, out *NnfAccessSpec, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in, out, s) } + +func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *nnfv1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index c19ca524..cd3af0a6 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -223,11 +223,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfDataMovementProfileList)(nil), (*v1alpha4.NnfDataMovementProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(a.(*NnfDataMovementProfileList), b.(*v1alpha4.NnfDataMovementProfileList), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfDataMovementProfileData)(nil), (*NnfDataMovementProfileData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(a.(*v1alpha4.NnfDataMovementProfileData), b.(*NnfDataMovementProfileData), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -1432,17 +1432,23 @@ func autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovement out.ProgressIntervalSeconds = in.ProgressIntervalSeconds out.CreateDestDir = in.CreateDestDir out.StatCommand = in.StatCommand + // WARNING: in.MkdirCommand requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData is an autogenerated conversion function. -func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *v1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) -} - func autoConvert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProfileList(in *NnfDataMovementProfileList, out *v1alpha4.NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfDataMovementProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfDataMovementProfile_To_v1alpha4_NnfDataMovementProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -1453,7 +1459,17 @@ func Convert_v1alpha3_NnfDataMovementProfileList_To_v1alpha4_NnfDataMovementProf func autoConvert_v1alpha4_NnfDataMovementProfileList_To_v1alpha3_NnfDataMovementProfileList(in *v1alpha4.NnfDataMovementProfileList, out *NnfDataMovementProfileList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfDataMovementProfile)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfDataMovementProfile, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfDataMovementProfile_To_v1alpha3_NnfDataMovementProfile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } diff --git a/api/v1alpha4/nnfdatamovementprofile_types.go b/api/v1alpha4/nnfdatamovementprofile_types.go index 59fb2054..a187c0b6 100644 --- a/api/v1alpha4/nnfdatamovementprofile_types.go +++ b/api/v1alpha4/nnfdatamovementprofile_types.go @@ -84,7 +84,7 @@ type NnfDataMovementProfileData struct { CreateDestDir bool `json:"createDestDir"` // If CreateDestDir is true, then use StatCommand to perform the stat commands. - // Use setpriv to stat the path with the specified UID/GID. + // Use setpriv to execute with the specified UID/GID. // Available $VARS: // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the // slots/max_slots for each host. This hostfile is created at @@ -94,6 +94,18 @@ type NnfDataMovementProfileData struct { // PATH: Path to stat // +kubebuilder:default:="mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- setpriv --euid $UID --egid $GID --clear-groups stat --cached never -c '%F' $PATH" StatCommand string `json:"statCommand"` + + // If CreateDestDir is true, then use MkdirCommand to perform the mkdir commands. + // Use setpriv to execute with the specified UID/GID. + // Available $VARS: + // HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + // slots/max_slots for each host. This hostfile is created at + // `/tmp//hostfile`. This is the same hostfile used as the one for Command. + // UID: User ID that is inherited from the Workflow + // GID: Group ID that is inherited from the Workflow + // PATH: Path to stat + // +kubebuilder:default:="mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- setpriv --euid $UID --egid $GID --clear-groups mkdir -p $PATH" + MkdirCommand string `json:"mkdirCommand"` } // +kubebuilder:object:root=true diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml index 7e12c44e..639d2934 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfdatamovementprofiles.yaml @@ -330,6 +330,20 @@ spec: use of max_slots in the hostfile. The hostfile is used for both `statCommand` and `Command`. minimum: 0 type: integer + mkdirCommand: + default: mpirun --allow-run-as-root -np 1 --hostfile $HOSTFILE -- + setpriv --euid $UID --egid $GID --clear-groups mkdir -p $PATH + description: |- + If CreateDestDir is true, then use MkdirCommand to perform the mkdir commands. + Use setpriv to execute with the specified UID/GID. + Available $VARS: + HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the + slots/max_slots for each host. This hostfile is created at + `/tmp//hostfile`. This is the same hostfile used as the one for Command. + UID: User ID that is inherited from the Workflow + GID: Group ID that is inherited from the Workflow + PATH: Path to stat + type: string pinned: default: false description: Pinned is true if this instance is an immutable copy @@ -356,7 +370,7 @@ spec: -c '%F' $PATH description: |- If CreateDestDir is true, then use StatCommand to perform the stat commands. - Use setpriv to stat the path with the specified UID/GID. + Use setpriv to execute with the specified UID/GID. Available $VARS: HOSTFILE: hostfile that is created and used for mpirun. Contains a list of hosts and the slots/max_slots for each host. This hostfile is created at @@ -375,6 +389,7 @@ spec: - command - createDestDir - maxSlots + - mkdirCommand - slots - statCommand type: object From 595b723d900a9da2f6c7ae533e75613608c4627b Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Tue, 26 Nov 2024 09:06:36 -0600 Subject: [PATCH 17/23] Propagate ENVIRONMENT and NNF_NODE_NAME to user containers (#423) Signed-off-by: Dean Roehrich --- .../nnf_workflow_controller_container_helpers.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/internal/controller/nnf_workflow_controller_container_helpers.go b/internal/controller/nnf_workflow_controller_container_helpers.go index a8f2533a..c8977228 100644 --- a/internal/controller/nnf_workflow_controller_container_helpers.go +++ b/internal/controller/nnf_workflow_controller_container_helpers.go @@ -22,6 +22,7 @@ package controller import ( "context" "fmt" + "os" "strconv" "strings" @@ -566,7 +567,7 @@ func (c *nnfUserContainer) addEnvVars(spec *corev1.PodSpec, mpi bool) { subdomain = worker hosts = append(hosts, launcher) - for i, _ := range c.nnfNodes { + for i := range c.nnfNodes { hosts = append(hosts, fmt.Sprintf("%s-%d", worker, i)) } } else { @@ -577,6 +578,16 @@ func (c *nnfUserContainer) addEnvVars(spec *corev1.PodSpec, mpi bool) { container.Env = append(container.Env, corev1.EnvVar{Name: "NNF_CONTAINER_SUBDOMAIN", Value: subdomain}, corev1.EnvVar{Name: "NNF_CONTAINER_DOMAIN", Value: domain}, - corev1.EnvVar{Name: "NNF_CONTAINER_HOSTNAMES", Value: strings.Join(hosts, " ")}) + corev1.EnvVar{Name: "NNF_CONTAINER_HOSTNAMES", Value: strings.Join(hosts, " ")}, + corev1.EnvVar{Name: "ENVIRONMENT", Value: os.Getenv("ENVIRONMENT")}, + corev1.EnvVar{ + Name: "NNF_NODE_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "spec.nodeName", + }, + }, + }) } } From 123b12e709b526984f8626d2e75d0a703e02da30 Mon Sep 17 00:00:00 2001 From: matthew-richerson <82597529+matthew-richerson@users.noreply.github.com> Date: Thu, 5 Dec 2024 13:45:22 -0600 Subject: [PATCH 18/23] Requeue in NnfAccess after NnfNodeBlockStorage conflict (#425) The NnfNodeBlockStorage resource is not owned by the NnfAccess, so the NnfAccess won't be requeued after the client cache updates. Signed-off-by: Matt Richerson --- internal/controller/nnf_access_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index d5fdf698..ea75f168 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -217,7 +217,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha4.Nnf err = r.addBlockStorageAccess(ctx, access, storageMapping) if err != nil { if apierrors.IsConflict(err) { - return &ctrl.Result{}, nil + return &ctrl.Result{RequeueAfter: time.Second * 2}, nil } return nil, dwsv1alpha2.NewResourceError("unable to add endpoints to NnfNodeStorage").WithError(err) From 4c04a02be9798542ba67a210a3d1d6f8fd7f43f4 Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Fri, 6 Dec 2024 16:10:45 -0600 Subject: [PATCH 19/23] Fix NnfAccess ClientMount count This was a workaround to account for creating multiple OSTs in an unorthodox manner in the Servers resource. Flux won't create multiple allocation sets on a single rabbit, but rather use the count when there are multiple allocations on a single rabbit. This workaround causes a bug and is not needed. Signed-off-by: Blake Devcich --- internal/controller/nnf_access_controller.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index ea75f168..3d489d86 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -1121,14 +1121,12 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * } // Check whether the clientmounts have finished mounting/unmounting - count := 0 for _, clientMount := range clientMounts { if len(clientMount.Status.Mounts) != len(clientMount.Spec.Mounts) { return false, nil } for _, mount := range clientMount.Status.Mounts { - count++ if string(mount.State) != access.Status.State { return false, nil } @@ -1139,9 +1137,9 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * } } - if count != len(clientList) { + if len(clientMounts) != len(clientList) { if access.GetDeletionTimestamp().IsZero() { - log.Info("unexpected number of ClientMounts", "found", count, "expected", len(clientList)) + log.Info("unexpected number of ClientMounts", "found", len(clientMounts), "expected", len(clientList)) } return false, nil } From fdac84d9495fc93317a4299b51305731bd2213aa Mon Sep 17 00:00:00 2001 From: Blake Devcich Date: Wed, 4 Dec 2024 13:25:45 -0600 Subject: [PATCH 20/23] PostMount Env Variables (e.g. NUM_OSTS) There is no way to get the number of OSTs, etc when using `PostMount` commands. For instance, when setting the striping. This change adds the following environment variables for use in the `NnfStorageProfiles` when using `*CmdLines`: - NUM_MDTS - NUM_MGTS - NUM_MGTMDTS - NUM_OSTS - NUM_NNFNODES To support this, a list of nodes for each component type has been added to the status of `NnfStorage`, which in turn is copied to the `NnfNodeStorage` resource's spec. This info can then be turned into the environment variables for use when running the commands. The `.nnf-servers.json` file created to store this information on the compute node has been updated to this new structure. An example with 8 OSTs and 2 MGTMDTs on 1 rabbit: ```json { "mdt": [], "mgt": [], "mgtmdt": [ "rabbit-node-1", "rabbit-node-1" ], "nnfNode": [ "rabbit-node-1" ], "ost": [ "rabbit-node-1", "rabbit-node-1", "rabbit-node-1", "rabbit-node-1", "rabbit-node-1", "rabbit-node-1", "rabbit-node-1", "rabbit-node-1" ] } ``` Signed-off-by: Blake Devcich --- api/v1alpha2/conversion.go | 29 ++++++- api/v1alpha2/zz_generated.conversion.go | 80 +++++++++++++------ api/v1alpha3/conversion.go | 31 ++++++- api/v1alpha3/zz_generated.conversion.go | 80 +++++++++++++------ api/v1alpha4/nnfnodestorage_types.go | 5 ++ api/v1alpha4/nnfstorage_types.go | 25 ++++++ api/v1alpha4/zz_generated.deepcopy.go | 48 ++++++++++- .../nnf.cray.hpe.com_nnfnodestorages.yaml | 35 ++++++++ .../bases/nnf.cray.hpe.com_nnfstorages.yaml | 34 ++++++++ internal/controller/filesystem_helpers.go | 21 ++--- .../controller/nnf_clientmount_controller.go | 64 ++++++++------- .../nnf_clientmount_controller_test.go | 62 ++++++++------ internal/controller/nnf_storage_controller.go | 49 ++++++++++++ .../controller/nnf_storage_controller_test.go | 77 ++++++++++++++++++ 14 files changed, 522 insertions(+), 118 deletions(-) create mode 100644 internal/controller/nnf_storage_controller_test.go diff --git a/api/v1alpha2/conversion.go b/api/v1alpha2/conversion.go index 8806dd0d..1049d3bf 100644 --- a/api/v1alpha2/conversion.go +++ b/api/v1alpha2/conversion.go @@ -343,12 +343,20 @@ func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfNodeStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.LustreStorage.LustreComponents.MDTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MDTs...) + dst.Spec.LustreStorage.LustreComponents.MGTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MGTs...) + dst.Spec.LustreStorage.LustreComponents.MGTMDTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MGTMDTs...) + dst.Spec.LustreStorage.LustreComponents.OSTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.OSTs...) + dst.Spec.LustreStorage.LustreComponents.NNFNodes = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.NNFNodes...) + } return nil } @@ -407,12 +415,21 @@ func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } + // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Status.LustreComponents.MDTs = append([]string(nil), restored.Status.LustreComponents.MDTs...) + dst.Status.LustreComponents.MGTs = append([]string(nil), restored.Status.LustreComponents.MGTs...) + dst.Status.LustreComponents.MGTMDTs = append([]string(nil), restored.Status.LustreComponents.MGTMDTs...) + dst.Status.LustreComponents.OSTs = append([]string(nil), restored.Status.LustreComponents.OSTs...) + dst.Status.LustreComponents.NNFNodes = append([]string(nil), restored.Status.LustreComponents.NNFNodes...) + } return nil } @@ -671,3 +688,11 @@ func Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *nnfv1alpha4.Nn func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in *nnfv1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha2_NnfDataMovementProfileData(in, out, s) } + +func Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *nnfv1alpha4.LustreStorageSpec, out *LustreStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in, out, s) +} + +func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *nnfv1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in, out, s) +} diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index 3dd14989..bd69100d 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -48,11 +48,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) }); err != nil { @@ -678,11 +673,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) }); err != nil { @@ -848,6 +838,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha2_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -896,14 +896,10 @@ func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1 out.StartIndex = in.StartIndex out.MgsAddress = in.MgsAddress out.BackFs = in.BackFs + // WARNING: in.LustreComponents requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha2_LustreStorageSpec(in, out, s) -} - func autoConvert_v1alpha2_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha2_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { @@ -2188,7 +2184,17 @@ func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha2_NnfNodeStorageA func autoConvert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfNodeStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2199,7 +2205,17 @@ func Convert_v1alpha2_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfN func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha2_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha2_NnfNodeStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2578,7 +2594,17 @@ func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha2_NnfStorageAlloca func autoConvert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_NnfStorage_To_v1alpha4_NnfStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2589,7 +2615,17 @@ func Convert_v1alpha2_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageLi func autoConvert_v1alpha4_NnfStorageList_To_v1alpha2_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfStorage_To_v1alpha2_NnfStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2640,14 +2676,10 @@ func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreSta out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference + // WARNING: in.LustreComponents requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha2_NnfStorageLustreStatus(in, out, s) -} - func autoConvert_v1alpha2_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha2_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index 14e5819c..23682f1f 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -342,12 +342,20 @@ func (src *NnfNodeStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfNodeStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.LustreStorage.LustreComponents.MDTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MDTs...) + dst.Spec.LustreStorage.LustreComponents.MGTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MGTs...) + dst.Spec.LustreStorage.LustreComponents.MGTMDTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.MGTMDTs...) + dst.Spec.LustreStorage.LustreComponents.OSTs = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.OSTs...) + dst.Spec.LustreStorage.LustreComponents.NNFNodes = append([]string(nil), restored.Spec.LustreStorage.LustreComponents.NNFNodes...) + } return nil } @@ -406,12 +414,21 @@ func (src *NnfStorage) ConvertTo(dstRaw conversion.Hub) error { // Manually restore data. restored := &nnfv1alpha4.NnfStorage{} - if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { return err } + // EDIT THIS FUNCTION! If the annotation is holding anything that is // hub-specific then copy it into 'dst' from 'restored'. // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Status.LustreComponents.MDTs = append([]string(nil), restored.Status.LustreComponents.MDTs...) + dst.Status.LustreComponents.MGTs = append([]string(nil), restored.Status.LustreComponents.MGTs...) + dst.Status.LustreComponents.MGTMDTs = append([]string(nil), restored.Status.LustreComponents.MGTMDTs...) + dst.Status.LustreComponents.OSTs = append([]string(nil), restored.Status.LustreComponents.OSTs...) + dst.Status.LustreComponents.NNFNodes = append([]string(nil), restored.Status.LustreComponents.NNFNodes...) + } return nil } @@ -684,3 +701,13 @@ func Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *nnfv1alpha4.Nn func Convert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in *nnfv1alpha4.NnfDataMovementProfileData, out *NnfDataMovementProfileData, s apiconversion.Scope) error { return autoConvert_v1alpha4_NnfDataMovementProfileData_To_v1alpha3_NnfDataMovementProfileData(in, out, s) } + +// Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec is an autogenerated conversion function. +func Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *nnfv1alpha4.LustreStorageSpec, out *LustreStorageSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in, out, s) +} + +// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus is an autogenerated conversion function. +func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *nnfv1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s apiconversion.Scope) error { + return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index cd3af0a6..98b09515 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -48,11 +48,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfAccess)(nil), (*v1alpha4.NnfAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(a.(*NnfAccess), b.(*v1alpha4.NnfAccess), scope) }); err != nil { @@ -678,11 +673,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NnfStorageProfile)(nil), (*v1alpha4.NnfStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(a.(*NnfStorageProfile), b.(*v1alpha4.NnfStorageProfile), scope) }); err != nil { @@ -848,6 +838,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.LustreStorageSpec)(nil), (*LustreStorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(a.(*v1alpha4.LustreStorageSpec), b.(*LustreStorageSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfAccessSpec)(nil), (*NnfAccessSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(a.(*v1alpha4.NnfAccessSpec), b.(*NnfAccessSpec), scope) }); err != nil { @@ -858,6 +853,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha4.NnfStorageLustreStatus)(nil), (*NnfStorageLustreStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(a.(*v1alpha4.NnfStorageLustreStatus), b.(*NnfStorageLustreStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.NnfStorageProfileCmdLines)(nil), (*NnfStorageProfileCmdLines)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_NnfStorageProfileCmdLines_To_v1alpha3_NnfStorageProfileCmdLines(a.(*v1alpha4.NnfStorageProfileCmdLines), b.(*NnfStorageProfileCmdLines), scope) }); err != nil { @@ -896,14 +896,10 @@ func autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *v1 out.StartIndex = in.StartIndex out.MgsAddress = in.MgsAddress out.BackFs = in.BackFs + // WARNING: in.LustreComponents requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec is an autogenerated conversion function. -func Convert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in *v1alpha4.LustreStorageSpec, out *LustreStorageSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_LustreStorageSpec_To_v1alpha3_LustreStorageSpec(in, out, s) -} - func autoConvert_v1alpha3_NnfAccess_To_v1alpha4_NnfAccess(in *NnfAccess, out *v1alpha4.NnfAccess, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_NnfAccessSpec_To_v1alpha4_NnfAccessSpec(&in.Spec, &out.Spec, s); err != nil { @@ -2188,7 +2184,17 @@ func Convert_v1alpha4_NnfNodeStorageAllocationStatus_To_v1alpha3_NnfNodeStorageA func autoConvert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfNodeStorageList, out *v1alpha4.NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfNodeStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfNodeStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfNodeStorage_To_v1alpha4_NnfNodeStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2199,7 +2205,17 @@ func Convert_v1alpha3_NnfNodeStorageList_To_v1alpha4_NnfNodeStorageList(in *NnfN func autoConvert_v1alpha4_NnfNodeStorageList_To_v1alpha3_NnfNodeStorageList(in *v1alpha4.NnfNodeStorageList, out *NnfNodeStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfNodeStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfNodeStorage_To_v1alpha3_NnfNodeStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2578,7 +2594,17 @@ func Convert_v1alpha4_NnfStorageAllocationSetStatus_To_v1alpha3_NnfStorageAlloca func autoConvert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageList, out *v1alpha4.NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.NnfStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.NnfStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_NnfStorage_To_v1alpha4_NnfStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2589,7 +2615,17 @@ func Convert_v1alpha3_NnfStorageList_To_v1alpha4_NnfStorageList(in *NnfStorageLi func autoConvert_v1alpha4_NnfStorageList_To_v1alpha3_NnfStorageList(in *v1alpha4.NnfStorageList, out *NnfStorageList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]NnfStorage)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfStorage, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_NnfStorage_To_v1alpha3_NnfStorage(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2640,14 +2676,10 @@ func autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreSta out.MgsAddress = in.MgsAddress out.FileSystemName = in.FileSystemName out.LustreMgtReference = in.LustreMgtReference + // WARNING: in.LustreComponents requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus is an autogenerated conversion function. -func Convert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in *v1alpha4.NnfStorageLustreStatus, out *NnfStorageLustreStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_NnfStorageLustreStatus_To_v1alpha3_NnfStorageLustreStatus(in, out, s) -} - func autoConvert_v1alpha3_NnfStorageProfile_To_v1alpha4_NnfStorageProfile(in *NnfStorageProfile, out *v1alpha4.NnfStorageProfile, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_NnfStorageProfileData_To_v1alpha4_NnfStorageProfileData(&in.Data, &out.Data, s); err != nil { diff --git a/api/v1alpha4/nnfnodestorage_types.go b/api/v1alpha4/nnfnodestorage_types.go index 6b6b014b..b31af19d 100644 --- a/api/v1alpha4/nnfnodestorage_types.go +++ b/api/v1alpha4/nnfnodestorage_types.go @@ -88,6 +88,11 @@ type LustreStorageSpec struct { // BackFs is the type of backing filesystem to use. // +kubebuilder:validation:Enum=ldiskfs;zfs BackFs string `json:"backFs,omitempty"` + + // LustreComponents defines that list of NNF Nodes that are used for the components (e.g. OSTs) + // in the lustre filesystem. This information is helpful when creating the lustre filesystem and + // using PostMount commands (e.g. to set the striping). + LustreComponents NnfStorageLustreComponents `json:"lustreComponents,omitempty"` } // NnfNodeStorageStatus defines the status for NnfNodeStorage diff --git a/api/v1alpha4/nnfstorage_types.go b/api/v1alpha4/nnfstorage_types.go index b9ab275c..95c96ccd 100644 --- a/api/v1alpha4/nnfstorage_types.go +++ b/api/v1alpha4/nnfstorage_types.go @@ -60,6 +60,27 @@ type NnfStorageLustreSpec struct { PersistentMgsReference corev1.ObjectReference `json:"persistentMgsReference,omitempty"` } +// NnfStorageLustreComponents identifies which NNF nodes are used for each lustre component used by +// the lustre filesystem. Each list can include an NNF node multiple times if that is how it is +// being used (except for NNFNodes). +type NnfStorageLustreComponents struct { + // MTDs is the list of NNF nodes being used as MDTs. + MDTs []string `json:"mdts,omitempty"` + + // MGTs is the list of NNF nodes being used as MGTs. + MGTs []string `json:"mgts,omitempty"` + + // MGTMDTs is the list of NNF nodes being used as combined MGTMDTs. + MGTMDTs []string `json:"mgtmdts,omitempty"` + + // OSTs is the list of NNF nodes being used as OSTs. + OSTs []string `json:"osts,omitempty"` + + // NNfNodes is the list of NNF nodes being used for this filesystem. This is a unique list of + // node names. + NNFNodes []string `json:"nnfNodes,omitempty"` +} + // NnfStorageAllocationSetSpec defines the details for an allocation set type NnfStorageAllocationSetSpec struct { // Name is a human readable label for this set of allocations (e.g., xfs) @@ -124,6 +145,10 @@ type NnfStorageLustreStatus struct { // LustgreMgtReference is an object reference to the NnfLustreMGT resource used // by the NnfStorage LustreMgtReference corev1.ObjectReference `json:"lustreMgtReference,omitempty"` + + // LustreComponents defines that list of NNF Nodes that are used for the components (e.g. OSTs) + // in the lustre filesystem. + LustreComponents NnfStorageLustreComponents `json:"lustreComponents,omitempty"` } // NnfStorageStatus defines the observed status of NNF Storage. diff --git a/api/v1alpha4/zz_generated.deepcopy.go b/api/v1alpha4/zz_generated.deepcopy.go index dbbf2b92..a581ab62 100644 --- a/api/v1alpha4/zz_generated.deepcopy.go +++ b/api/v1alpha4/zz_generated.deepcopy.go @@ -32,6 +32,7 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LustreStorageSpec) DeepCopyInto(out *LustreStorageSpec) { *out = *in + in.LustreComponents.DeepCopyInto(&out.LustreComponents) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreStorageSpec. @@ -1188,7 +1189,7 @@ func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -1260,7 +1261,7 @@ func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { *out = *in - out.LustreStorage = in.LustreStorage + in.LustreStorage.DeepCopyInto(&out.LustreStorage) out.BlockReference = in.BlockReference } @@ -1583,6 +1584,46 @@ func (in *NnfStorageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageLustreComponents) DeepCopyInto(out *NnfStorageLustreComponents) { + *out = *in + if in.MDTs != nil { + in, out := &in.MDTs, &out.MDTs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MGTs != nil { + in, out := &in.MGTs, &out.MGTs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MGTMDTs != nil { + in, out := &in.MGTMDTs, &out.MGTMDTs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OSTs != nil { + in, out := &in.OSTs, &out.OSTs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NNFNodes != nil { + in, out := &in.NNFNodes, &out.NNFNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreComponents. +func (in *NnfStorageLustreComponents) DeepCopy() *NnfStorageLustreComponents { + if in == nil { + return nil + } + out := new(NnfStorageLustreComponents) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageLustreSpec) DeepCopyInto(out *NnfStorageLustreSpec) { *out = *in @@ -1603,6 +1644,7 @@ func (in *NnfStorageLustreSpec) DeepCopy() *NnfStorageLustreSpec { func (in *NnfStorageLustreStatus) DeepCopyInto(out *NnfStorageLustreStatus) { *out = *in out.LustreMgtReference = in.LustreMgtReference + in.LustreComponents.DeepCopyInto(&out.LustreComponents) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageLustreStatus. @@ -1915,7 +1957,7 @@ func (in *NnfStorageSpec) DeepCopy() *NnfStorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageStatus) DeepCopyInto(out *NnfStorageStatus) { *out = *in - out.NnfStorageLustreStatus = in.NnfStorageLustreStatus + in.NnfStorageLustreStatus.DeepCopyInto(&out.NnfStorageLustreStatus) if in.AllocationSets != nil { in, out := &in.AllocationSets, &out.AllocationSets *out = make([]NnfStorageAllocationSetStatus, len(*in)) diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index 2b0ddcb1..8a7aad79 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -576,6 +576,41 @@ spec: filesystem. maxLength: 8 type: string + lustreComponents: + description: |- + LustreComponents defines that list of NNF Nodes that are used for the components (e.g. OSTs) + in the lustre filesystem. This information is helpful when creating the lustre filesystem and + using PostMount commands (e.g. to set the striping). + properties: + mdts: + description: MTDs is the list of NNF nodes being used as MDTs. + items: + type: string + type: array + mgtmdts: + description: MGTMDTs is the list of NNF nodes being used as + combined MGTMDTs. + items: + type: string + type: array + mgts: + description: MGTs is the list of NNF nodes being used as MGTs. + items: + type: string + type: array + nnfNodes: + description: |- + NNfNodes is the list of NNF nodes being used for this filesystem. This is a unique list of + node names. + items: + type: string + type: array + osts: + description: OSTs is the list of NNF nodes being used as OSTs. + items: + type: string + type: array + type: object mgsAddress: description: |- MgsAddress is the NID of the MGS to use. This is used only when diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index 3bad19ad..0c3abe1b 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -841,6 +841,40 @@ spec: filesystem. maxLength: 8 type: string + lustreComponents: + description: |- + LustreComponents defines that list of NNF Nodes that are used for the components (e.g. OSTs) + in the lustre filesystem. + properties: + mdts: + description: MTDs is the list of NNF nodes being used as MDTs. + items: + type: string + type: array + mgtmdts: + description: MGTMDTs is the list of NNF nodes being used as combined + MGTMDTs. + items: + type: string + type: array + mgts: + description: MGTs is the list of NNF nodes being used as MGTs. + items: + type: string + type: array + nnfNodes: + description: |- + NNfNodes is the list of NNF nodes being used for this filesystem. This is a unique list of + node names. + items: + type: string + type: array + osts: + description: OSTs is the list of NNF nodes being used as OSTs. + items: + type: string + type: array + type: object lustreMgtReference: description: |- LustgreMgtReference is an object reference to the NnfLustreMGT resource used diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index 4e34caa2..169a40ce 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -125,21 +125,17 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt return blockDevice, fileSystem, nil case "lustre": - commandLines := nnfv1alpha4.NnfStorageProfileLustreCmdLines{} + var commandLines nnfv1alpha4.NnfStorageProfileLustreCmdLines switch nnfNodeStorage.Spec.LustreStorage.TargetType { case "mgt": commandLines = nnfStorageProfile.Data.LustreStorage.MgtCmdLines - break case "mgtmdt": commandLines = nnfStorageProfile.Data.LustreStorage.MgtMdtCmdLines - break case "mdt": commandLines = nnfStorageProfile.Data.LustreStorage.MdtCmdLines - break case "ost": commandLines = nnfStorageProfile.Data.LustreStorage.OstCmdLines - break default: return nil, nil, dwsv1alpha2.NewResourceError("invalid Lustre target type %s", nnfNodeStorage.Spec.LustreStorage.TargetType).WithFatal() } @@ -225,7 +221,7 @@ func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *n return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() } - if nnfNodeBlockStorage.Status.Ready == false { + if !nnfNodeBlockStorage.Status.Ready { return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) } @@ -277,7 +273,7 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() } - if nnfNodeBlockStorage.Status.Ready == false { + if !nnfNodeBlockStorage.Status.Ready { return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) } @@ -454,9 +450,16 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n fs.CommandArgs.PreDeactivate = cmdLines.PreDeactivate fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) + components := nnfNodeStorage.Spec.LustreStorage.LustreComponents + fs.CommandArgs.Vars = map[string]string{ - "$USERID": fmt.Sprintf("%d", nnfNodeStorage.Spec.UserID), - "$GROUPID": fmt.Sprintf("%d", nnfNodeStorage.Spec.GroupID), + "$USERID": fmt.Sprintf("%d", nnfNodeStorage.Spec.UserID), + "$GROUPID": fmt.Sprintf("%d", nnfNodeStorage.Spec.GroupID), + "$NUM_MDTS": fmt.Sprintf("%d", len(components.MDTs)), + "$NUM_MGTS": fmt.Sprintf("%d", len(components.MGTs)), + "$NUM_MGTMDTS": fmt.Sprintf("%d", len(components.MGTMDTs)), + "$NUM_OSTS": fmt.Sprintf("%d", len(components.OSTs)), + "$NUM_NNFNODES": fmt.Sprintf("%d", len(components.NNFNodes)), } return &fs, nil diff --git a/internal/controller/nnf_clientmount_controller.go b/internal/controller/nnf_clientmount_controller.go index 36292d01..3132a4f9 100644 --- a/internal/controller/nnf_clientmount_controller.go +++ b/internal/controller/nnf_clientmount_controller.go @@ -318,7 +318,9 @@ func (r *NnfClientMountReconciler) dumpServersToFile(ctx context.Context, client defer file.Close() encoder := json.NewEncoder(file) - err = encoder.Encode(createLustreMapping(server)) + + components := getLustreMappingFromServer(server) + err = encoder.Encode(components) if err != nil { return dwsv1alpha2.NewResourceError("could not write JSON to file").WithError(err).WithMajor() } @@ -350,9 +352,9 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, ownerNS, ownerNSExists := clientMount.Labels[dwsv1alpha2.OwnerNamespaceLabel] _, idxExists := clientMount.Labels[nnfv1alpha4.DirectiveIndexLabel] - // We should expect the owner of the ClientMount to be NnfStorage and have the expected labels + // We should expect the owner to be NnfStorage and have the expected labels if !ownerExists || !ownerNameExists || !ownerNSExists || !idxExists || ownerKind != storageKind { - return nil, dwsv1alpha2.NewResourceError("expected ClientMount owner to be of kind NnfStorage and have the expected labels").WithMajor() + return nil, dwsv1alpha2.NewResourceError("expected owner to be of kind NnfStorage and have the expected labels").WithMajor() } // Retrieve the NnfStorage resource @@ -375,7 +377,7 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, // We should expect the owner of the NnfStorage to be Workflow or PersistentStorageInstance and // have the expected labels if !ownerExists || !ownerNameExists || !ownerNSExists || !idxExists || (ownerKind != workflowKind && ownerKind != persistentKind) { - return nil, dwsv1alpha2.NewResourceError("expected NnfStorage owner to be of kind Workflow or PersistentStorageInstance and have the expected labels").WithMajor() + return nil, dwsv1alpha2.NewResourceError("expected owner to be of kind Workflow or PersistentStorageInstance and have the expected labels").WithMajor() } // If the owner is a workflow, then we can use the workflow labels and directive index to get @@ -414,36 +416,40 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, return &serversList.Items[0], nil } -/* -Flatten the AllocationSets to create mapping for lustre information. Example: - - { - "ost": [ - "rabbit-node-1", - "rabbit-node=2" - ] - "mdt": [ - "rabbit-node-1", - "rabbit-node=2" - ] - } -*/ -func createLustreMapping(server *dwsv1alpha2.Servers) map[string][]string { - - m := map[string][]string{} - - for _, allocationSet := range server.Status.AllocationSets { +// Go through the Server's allocation sets to determine the number of Lustre components and rabbit +// nodes. Returns a map with keys for each lustre component type and also the nnf nodes involved. The +// list of nnf nodes is kept unique, but mdts, osts, etc can include a node multiple times. +func getLustreMappingFromServer(server *dwsv1alpha2.Servers) map[string][]string { + nnfNodeKey := "nnfNode" + components := map[string][]string{ + "mdt": []string{}, + "mgt": []string{}, + "mgtmdt": []string{}, + "ost": []string{}, + nnfNodeKey: []string{}, + } + rabbitMap := make(map[string]bool) // use a map to keep the list unique + + // Gather the info from the allocation set + for _, allocationSet := range server.Spec.AllocationSets { label := allocationSet.Label - if _, found := m[label]; !found { - m[label] = []string{} - } + for _, storage := range allocationSet.Storage { + node := storage.Name - for nnfNode, _ := range allocationSet.Storage { - m[label] = append(m[label], nnfNode) + // add to the list for that lustre component for each allocationCount + for i := 0; i < storage.AllocationCount; i++ { + components[label] = append(components[label], node) + } + + // add to the unique list of rabbits + if _, found := rabbitMap[node]; !found { + rabbitMap[node] = true + components[nnfNodeKey] = append(components[nnfNodeKey], node) + } } } - return m + return components } // fakeNnfNodeStorage creates an NnfNodeStorage resource filled in with only the fields diff --git a/internal/controller/nnf_clientmount_controller_test.go b/internal/controller/nnf_clientmount_controller_test.go index 4eb2066b..6443d324 100644 --- a/internal/controller/nnf_clientmount_controller_test.go +++ b/internal/controller/nnf_clientmount_controller_test.go @@ -30,36 +30,48 @@ var _ = Describe("Clientmount Controller Test", func() { It("It should correctly create a human-readable lustre mapping for Servers ", func() { s := dwsv1alpha2.Servers{ - Status: dwsv1alpha2.ServersStatus{ - AllocationSets: []dwsv1alpha2.ServersStatusAllocationSet{ - {Label: "ost", Storage: map[string]dwsv1alpha2.ServersStatusStorage{ - "rabbit-node-1": dwsv1alpha2.ServersStatusStorage{ - AllocationSize: 123345, - }, - "rabbit-node-2": dwsv1alpha2.ServersStatusStorage{ - AllocationSize: 123345, - }, - }}, - {Label: "mdt", Storage: map[string]dwsv1alpha2.ServersStatusStorage{ - "rabbit-node-3": dwsv1alpha2.ServersStatusStorage{ - AllocationSize: 123345, - }, - "rabbit-node-4": dwsv1alpha2.ServersStatusStorage{ - AllocationSize: 123345, - }, - "rabbit-node-8": dwsv1alpha2.ServersStatusStorage{ - AllocationSize: 123345, - }, - }}, + Spec: dwsv1alpha2.ServersSpec{ + AllocationSets: []dwsv1alpha2.ServersSpecAllocationSet{ + {Label: "ost", Storage: []dwsv1alpha2.ServersSpecStorage{ + {Name: "rabbit-node-1", AllocationCount: 2}, + {Name: "rabbit-node-2", AllocationCount: 1}}, + }, + // throw another OST on rabbit-node-2 + {Label: "ost", Storage: []dwsv1alpha2.ServersSpecStorage{ + {Name: "rabbit-node-2", AllocationCount: 1}}, + }, + {Label: "mdt", Storage: []dwsv1alpha2.ServersSpecStorage{ + {Name: "rabbit-node-3", AllocationCount: 1}, + {Name: "rabbit-node-4", AllocationCount: 1}, + {Name: "rabbit-node-8", AllocationCount: 1}}, + }, + {Label: "mgt", Storage: []dwsv1alpha2.ServersSpecStorage{ + {Name: "rabbit-node-3", AllocationCount: 1}}, + }, + {Label: "mgtmdt", Storage: []dwsv1alpha2.ServersSpecStorage{ + {Name: "rabbit-node-4", AllocationCount: 1}}, + }, }, }, } - m := createLustreMapping(&s) - Expect(m).To(HaveLen(2)) - Expect(m["ost"]).To(HaveLen(2)) - Expect(m["ost"]).Should(ContainElements("rabbit-node-1", "rabbit-node-2")) + Expect(s.Spec.AllocationSets).To(HaveLen(5)) + m := getLustreMappingFromServer(&s) + Expect(m).To(HaveLen(5)) // should have keys for 4 lustre components (i.e. ost, mdt, mgt, mgtmdt) + rabbits + + Expect(m["ost"]).To(HaveLen(4)) + Expect(m["ost"]).Should(ContainElements("rabbit-node-1", "rabbit-node-1", "rabbit-node-2", "rabbit-node-2")) + Expect(m["mdt"]).To(HaveLen(3)) Expect(m["mdt"]).Should(ContainElements("rabbit-node-3", "rabbit-node-4", "rabbit-node-8")) + + Expect(m["mgt"]).To(HaveLen(1)) + Expect(m["mgt"]).Should(ContainElements("rabbit-node-3")) + + Expect(m["mgtmdt"]).To(HaveLen(1)) + Expect(m["mgtmdt"]).Should(ContainElements("rabbit-node-4")) + + Expect(m["nnfNode"]).To(HaveLen(5)) + Expect(m["nnfNode"]).Should(ContainElements("rabbit-node-1", "rabbit-node-2", "rabbit-node-3", "rabbit-node-4", "rabbit-node-8")) }) }) diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index d9cd6e3e..a6a506b1 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -192,6 +192,18 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } } + // Collect the lists of nodes for each lustre component used for the filesystem + if storage.Spec.FileSystemType == "lustre" { + components := getLustreMappingFromStorage(storage) + storage.Status.LustreComponents = nnfv1alpha4.NnfStorageLustreComponents{ + MDTs: components["mdt"], + MGTs: components["mgt"], + MGTMDTs: components["mgtmdt"], + OSTs: components["ost"], + NNFNodes: components["nnfNode"], + } + } + // For each allocation, create the NnfNodeStorage resources to fan out to the Rabbit nodes for i, allocationSet := range storage.Spec.AllocationSets { // Add a reference to the external MGS PersistentStorageInstance if necessary @@ -639,6 +651,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n nnfNodeStorage.Spec.LustreStorage.TargetType = allocationSet.TargetType nnfNodeStorage.Spec.LustreStorage.FileSystemName = storage.Status.FileSystemName nnfNodeStorage.Spec.LustreStorage.MgsAddress = storage.Status.MgsAddress + nnfNodeStorage.Spec.LustreStorage.LustreComponents = storage.Status.LustreComponents // If this isn't the first allocation, then change MGTMDT to MDT so that we only get a single MGT if allocationSet.TargetType == "mgtmdt" && startIndex != 0 { @@ -1272,6 +1285,42 @@ func (r *NnfStorageReconciler) getLustreOST0(ctx context.Context, storage *nnfv1 return nil, nil } +// Go through the Storage's allocation sets to determine the number of Lustre components and rabbit +// nodes. Returns a map with keys for each lustre component type and also the nnf nodes involved. +// The list of nnf nodes is kept unique, but mdts, osts, etc can include a node multiple times. +func getLustreMappingFromStorage(storage *nnfv1alpha4.NnfStorage) map[string][]string { + nnfNodeKey := "nnfNode" + componentMap := map[string][]string{ + "mdt": {}, + "mgt": {}, + "mgtmdt": {}, + "ost": {}, + nnfNodeKey: {}, + } + rabbitMap := make(map[string]bool) // use a map to keep the list unique + + // Gather the info from the allocation set + for _, allocationSet := range storage.Spec.AllocationSets { + name := allocationSet.Name + for _, storage := range allocationSet.Nodes { + node := storage.Name + + // add to the list for that lustre component for each Count + for i := 0; i < storage.Count; i++ { + componentMap[name] = append(componentMap[name], node) + } + + // add to the unique list of rabbits + if _, found := rabbitMap[node]; !found { + rabbitMap[node] = true + componentMap[nnfNodeKey] = append(componentMap[nnfNodeKey], node) + } + } + } + + return componentMap +} + // SetupWithManager sets up the controller with the Manager. func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ diff --git a/internal/controller/nnf_storage_controller_test.go b/internal/controller/nnf_storage_controller_test.go new file mode 100644 index 00000000..c31bc80b --- /dev/null +++ b/internal/controller/nnf_storage_controller_test.go @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + nnfv1alpha4 "github.com/NearNodeFlash/nnf-sos/api/v1alpha4" +) + +var _ = Describe("NNFStorage Controller Test", func() { + + It("It should correctly create a human-readable lustre mapping for NnfStorage", func() { + s := nnfv1alpha4.NnfStorage{ + Spec: nnfv1alpha4.NnfStorageSpec{ + AllocationSets: []nnfv1alpha4.NnfStorageAllocationSetSpec{ + {Name: "ost", Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ + {Name: "rabbit-node-1", Count: 2}, + {Name: "rabbit-node-2", Count: 1}}, + }, + // throw another OST on rabbit-node-2 + {Name: "ost", Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ + {Name: "rabbit-node-2", Count: 1}}, + }, + {Name: "mdt", Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ + {Name: "rabbit-node-3", Count: 1}, + {Name: "rabbit-node-4", Count: 1}, + {Name: "rabbit-node-8", Count: 1}}, + }, + {Name: "mgt", Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ + {Name: "rabbit-node-3", Count: 1}}, + }, + {Name: "mgtmdt", Nodes: []nnfv1alpha4.NnfStorageAllocationNodes{ + {Name: "rabbit-node-4", Count: 1}}, + }, + }, + }, + } + + Expect(s.Spec.AllocationSets).To(HaveLen(5)) + m := getLustreMappingFromStorage(&s) + Expect(m).To(HaveLen(5)) // should have keys for 4 lustre components (i.e. ost, mdt, mgt, mgtmdt) + rabbits + + Expect(m["ost"]).To(HaveLen(4)) + Expect(m["ost"]).Should(ContainElements("rabbit-node-1", "rabbit-node-1", "rabbit-node-2", "rabbit-node-2")) + + Expect(m["mdt"]).To(HaveLen(3)) + Expect(m["mdt"]).Should(ContainElements("rabbit-node-3", "rabbit-node-4", "rabbit-node-8")) + + Expect(m["mgt"]).To(HaveLen(1)) + Expect(m["mgt"]).Should(ContainElements("rabbit-node-3")) + + Expect(m["mgtmdt"]).To(HaveLen(1)) + Expect(m["mgtmdt"]).Should(ContainElements("rabbit-node-4")) + + Expect(m["nnfNode"]).To(HaveLen(5)) + Expect(m["nnfNode"]).Should(ContainElements("rabbit-node-1", "rabbit-node-2", "rabbit-node-3", "rabbit-node-4", "rabbit-node-8")) + }) +}) From e03b38f0d34153c2f16f532beb57bc984099aeec Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Mon, 9 Dec 2024 13:52:38 -0600 Subject: [PATCH 21/23] Revendor lustre-fs-operator and nnf-ec (#427) Signed-off-by: Dean Roehrich --- go.mod | 4 ++-- go.sum | 8 ++++---- vendor/modules.txt | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index edb41aea..cca0a9e9 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.21 require ( github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640 - github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c - github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241017152925-afc4d0cf1a4b + github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20241209183639-2d8fdbd63dec + github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241205165035-51a536434b0d github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.1 github.com/google/go-cmp v0.6.0 diff --git a/go.sum b/go.sum index 7bb263ea..057bebf0 100644 --- a/go.sum +++ b/go.sum @@ -4,10 +4,10 @@ github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640 h1:JSjg github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640/go.mod h1:6MrEEHISskyooSKcKU6R3mFqH6Yh6KzWgajhcw2s+nM= github.com/HewlettPackard/structex v1.0.4 h1:RVTdN5FWhDWr1IkjllU8wxuLjISo4gr6u5ryZpzyHcA= github.com/HewlettPackard/structex v1.0.4/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= -github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c h1:fSuMz3j8UzlYZI59Ded8XuUjYd7C5IyLB55jwgSTIew= -github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c/go.mod h1:3wENUqk1b7V0q5L5kNQ2ZE3z/NywL4sqXqVYolsiJ94= -github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241017152925-afc4d0cf1a4b h1:Foz6dsOk49tPimuKOxHAijX2BPuLxLH3Z+IegBMKHsU= -github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241017152925-afc4d0cf1a4b/go.mod h1:oxdwMqfttOF9dabJhqrWlirCnMk8/8eyLMwl+hducjk= +github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20241209183639-2d8fdbd63dec h1:LPeWeG5xeqixm1YfE26jY5mbhFS8jNkErrI+WmQLFgg= +github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20241209183639-2d8fdbd63dec/go.mod h1:3JGfBMIfipAZbbAAesSvKzGmKGAh2Wu6pPGQWMP2f8w= +github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241205165035-51a536434b0d h1:s+zaQp8959Z1KjAo/zRP05CRJUrHTKx2ItbR4C3ffQw= +github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241205165035-51a536434b0d/go.mod h1:oxdwMqfttOF9dabJhqrWlirCnMk8/8eyLMwl+hducjk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= diff --git a/vendor/modules.txt b/vendor/modules.txt index d14d11a7..e514b18b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,12 +11,12 @@ github.com/DataWorkflowServices/dws/utils/updater # github.com/HewlettPackard/structex v1.0.4 ## explicit; go 1.14 github.com/HewlettPackard/structex -# github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c +# github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20241209183639-2d8fdbd63dec ## explicit; go 1.21 github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1 github.com/NearNodeFlash/lustre-fs-operator/config/crd/bases github.com/NearNodeFlash/lustre-fs-operator/config/webhook -# github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241017152925-afc4d0cf1a4b +# github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241205165035-51a536434b0d ## explicit; go 1.19 github.com/NearNodeFlash/nnf-ec/internal/switchtec/pkg/nvme github.com/NearNodeFlash/nnf-ec/internal/switchtec/pkg/switchtec From 4f77b0ceeedc39edc6167da20f1a86e309a493ca Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Mon, 9 Dec 2024 15:24:34 -0600 Subject: [PATCH 22/23] Update nnf-mfu release references Signed-off-by: Dean Roehrich --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 75873b42..3c1b6cef 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,7 @@ IMAGE_TAG_BASE ?= ghcr.io/nearnodeflash/nnf-sos # The NNF-MFU container image to use in NNFContainerProfile resources. NNFMFU_TAG_BASE ?= ghcr.io/nearnodeflash/nnf-mfu -NNFMFU_VERSION ?= 0.1.3 +NNFMFU_VERSION ?= 0.1.4 # BUNDLE_IMG defines the image:tag used for the bundle. # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) From ea3c5253eed60d8b6f6dad5d3faad24fd1657fed Mon Sep 17 00:00:00 2001 From: Dean Roehrich Date: Mon, 9 Dec 2024 15:24:38 -0600 Subject: [PATCH 23/23] Update own release references Signed-off-by: Dean Roehrich --- config/manager/kustomization.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index a1fbc9f1..a3a67fda 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -18,4 +18,4 @@ kind: Kustomization images: - name: controller newName: ghcr.io/nearnodeflash/nnf-sos - newTag: 0.1.16 + newTag: 0.1.17