diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 5a6ab732d..d4f281229 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -28,6 +28,7 @@ import ( unsafe "unsafe" v1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + apiv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" v1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" v1 "k8s.io/api/core/v1" @@ -960,7 +961,7 @@ func Convert_v1alpha3_NnfAccessList_To_v1alpha1_NnfAccessList(in *v1alpha3.NnfAc func autoConvert_v1alpha1_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState - out.TeardownState = v1alpha2.WorkflowState(in.TeardownState) + out.TeardownState = apiv1alpha3.WorkflowState(in.TeardownState) out.Target = in.Target out.UserID = in.UserID out.GroupID = in.GroupID @@ -999,7 +1000,8 @@ func Convert_v1alpha3_NnfAccessSpec_To_v1alpha1_NnfAccessSpec(in *v1alpha3.NnfAc func autoConvert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1011,7 +1013,8 @@ func Convert_v1alpha1_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessS func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha1_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1508,7 +1511,8 @@ func autoConvert_v1alpha1_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatu out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts out.CommandStatus = (*v1alpha3.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1525,7 +1529,8 @@ func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha1_NnfDataMovementStatu out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts out.CommandStatus = (*NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1655,7 +1660,8 @@ func Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha1_NnfLustreMGTSpec(in *v1alpha3 func autoConvert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]v1alpha3.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1667,7 +1673,8 @@ func Convert_v1alpha1_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfL func autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha1_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1902,7 +1909,8 @@ func Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha1_NnfNodeBlockStorageSpe func autoConvert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil @@ -1915,7 +1923,8 @@ func Convert_v1alpha1_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageS func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha1_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil @@ -2217,7 +2226,8 @@ func Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha1_NnfNodeStorageSpec(in *v1al func autoConvert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]v1alpha3.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -2229,7 +2239,8 @@ func Convert_v1alpha1_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in * func autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha1_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -3067,7 +3078,8 @@ func autoConvert_v1alpha1_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfS return err } out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() out.Ready = in.Ready return nil } @@ -3082,7 +3094,8 @@ func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha1_NnfStorageStatus(in *v1al return err } out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() out.Ready = in.Ready return nil } @@ -3206,7 +3219,8 @@ func autoConvert_v1alpha3_NnfSystemStorageSpec_To_v1alpha1_NnfSystemStorageSpec( func autoConvert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert v1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -3217,7 +3231,8 @@ func Convert_v1alpha1_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus( func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha1_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to v1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } diff --git a/api/v1alpha2/zz_generated.conversion.go b/api/v1alpha2/zz_generated.conversion.go index 31e804b18..cb74cb81d 100644 --- a/api/v1alpha2/zz_generated.conversion.go +++ b/api/v1alpha2/zz_generated.conversion.go @@ -28,6 +28,7 @@ import ( unsafe "unsafe" apiv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + apiv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" v1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" v2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" v1 "k8s.io/api/core/v1" @@ -960,7 +961,7 @@ func Convert_v1alpha3_NnfAccessList_To_v1alpha2_NnfAccessList(in *v1alpha3.NnfAc func autoConvert_v1alpha2_NnfAccessSpec_To_v1alpha3_NnfAccessSpec(in *NnfAccessSpec, out *v1alpha3.NnfAccessSpec, s conversion.Scope) error { out.DesiredState = in.DesiredState - out.TeardownState = apiv1alpha2.WorkflowState(in.TeardownState) + out.TeardownState = apiv1alpha3.WorkflowState(in.TeardownState) out.Target = in.Target out.UserID = in.UserID out.GroupID = in.GroupID @@ -999,7 +1000,8 @@ func Convert_v1alpha3_NnfAccessSpec_To_v1alpha2_NnfAccessSpec(in *v1alpha3.NnfAc func autoConvert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessStatus, out *v1alpha3.NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1011,7 +1013,8 @@ func Convert_v1alpha2_NnfAccessStatus_To_v1alpha3_NnfAccessStatus(in *NnfAccessS func autoConvert_v1alpha3_NnfAccessStatus_To_v1alpha2_NnfAccessStatus(in *v1alpha3.NnfAccessStatus, out *NnfAccessStatus, s conversion.Scope) error { out.State = in.State out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1508,7 +1511,8 @@ func autoConvert_v1alpha2_NnfDataMovementStatus_To_v1alpha3_NnfDataMovementStatu out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts out.CommandStatus = (*v1alpha3.NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1525,7 +1529,8 @@ func autoConvert_v1alpha3_NnfDataMovementStatus_To_v1alpha2_NnfDataMovementStatu out.EndTime = (*metav1.MicroTime)(unsafe.Pointer(in.EndTime)) out.Restarts = in.Restarts out.CommandStatus = (*NnfDataMovementCommandStatus)(unsafe.Pointer(in.CommandStatus)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1655,7 +1660,8 @@ func Convert_v1alpha3_NnfLustreMGTSpec_To_v1alpha2_NnfLustreMGTSpec(in *v1alpha3 func autoConvert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfLustreMGTStatus, out *v1alpha3.NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]v1alpha3.NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1667,7 +1673,8 @@ func Convert_v1alpha2_NnfLustreMGTStatus_To_v1alpha3_NnfLustreMGTStatus(in *NnfL func autoConvert_v1alpha3_NnfLustreMGTStatus_To_v1alpha2_NnfLustreMGTStatus(in *v1alpha3.NnfLustreMGTStatus, out *NnfLustreMGTStatus, s conversion.Scope) error { out.FsNameNext = in.FsNameNext out.ClaimList = *(*[]NnfLustreMGTStatusClaim)(unsafe.Pointer(&in.ClaimList)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -1902,7 +1909,8 @@ func Convert_v1alpha3_NnfNodeBlockStorageSpec_To_v1alpha2_NnfNodeBlockStorageSpe func autoConvert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageStatus(in *NnfNodeBlockStorageStatus, out *v1alpha3.NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]v1alpha3.NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil @@ -1915,7 +1923,8 @@ func Convert_v1alpha2_NnfNodeBlockStorageStatus_To_v1alpha3_NnfNodeBlockStorageS func autoConvert_v1alpha3_NnfNodeBlockStorageStatus_To_v1alpha2_NnfNodeBlockStorageStatus(in *v1alpha3.NnfNodeBlockStorageStatus, out *NnfNodeBlockStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeBlockStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() out.PodStartTime = in.PodStartTime out.Ready = in.Ready return nil @@ -2217,7 +2226,8 @@ func Convert_v1alpha3_NnfNodeStorageSpec_To_v1alpha2_NnfNodeStorageSpec(in *v1al func autoConvert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in *NnfNodeStorageStatus, out *v1alpha3.NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]v1alpha3.NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -2229,7 +2239,8 @@ func Convert_v1alpha2_NnfNodeStorageStatus_To_v1alpha3_NnfNodeStorageStatus(in * func autoConvert_v1alpha3_NnfNodeStorageStatus_To_v1alpha2_NnfNodeStorageStatus(in *v1alpha3.NnfNodeStorageStatus, out *NnfNodeStorageStatus, s conversion.Scope) error { out.Allocations = *(*[]NnfNodeStorageAllocationStatus)(unsafe.Pointer(&in.Allocations)) out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } @@ -3067,7 +3078,8 @@ func autoConvert_v1alpha2_NnfStorageStatus_To_v1alpha3_NnfStorageStatus(in *NnfS return err } out.AllocationSets = *(*[]v1alpha3.NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() out.Ready = in.Ready return nil } @@ -3082,7 +3094,8 @@ func autoConvert_v1alpha3_NnfStorageStatus_To_v1alpha2_NnfStorageStatus(in *v1al return err } out.AllocationSets = *(*[]NnfStorageAllocationSetStatus)(unsafe.Pointer(&in.AllocationSets)) - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() out.Ready = in.Ready return nil } @@ -3192,7 +3205,8 @@ func Convert_v1alpha3_NnfSystemStorageSpec_To_v1alpha2_NnfSystemStorageSpec(in * func autoConvert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus(in *NnfSystemStorageStatus, out *v1alpha3.NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha2.ResourceError to apiv1alpha3.ResourceError + compileErrorOnMissingConversion() return nil } @@ -3203,7 +3217,8 @@ func Convert_v1alpha2_NnfSystemStorageStatus_To_v1alpha3_NnfSystemStorageStatus( func autoConvert_v1alpha3_NnfSystemStorageStatus_To_v1alpha2_NnfSystemStorageStatus(in *v1alpha3.NnfSystemStorageStatus, out *NnfSystemStorageStatus, s conversion.Scope) error { out.Ready = in.Ready - out.ResourceError = in.ResourceError + // FIXME: Provide conversion function to convert apiv1alpha3.ResourceError to apiv1alpha2.ResourceError + compileErrorOnMissingConversion() return nil } diff --git a/api/v1alpha3/nnf_resource_status_type.go b/api/v1alpha3/nnf_resource_status_type.go index 50d6e244b..523683c01 100644 --- a/api/v1alpha3/nnf_resource_status_type.go +++ b/api/v1alpha3/nnf_resource_status_type.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" ) @@ -95,22 +95,22 @@ func (rst NnfResourceStatusType) UpdateIfWorseThan(status *NnfResourceStatusType } } -func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha2.ResourceStatus { +func (rst NnfResourceStatusType) ConvertToDWSResourceStatus() dwsv1alpha3.ResourceStatus { switch rst { case ResourceStarting: - return dwsv1alpha2.StartingStatus + return dwsv1alpha3.StartingStatus case ResourceReady: - return dwsv1alpha2.ReadyStatus + return dwsv1alpha3.ReadyStatus case ResourceDisabled: - return dwsv1alpha2.DisabledStatus + return dwsv1alpha3.DisabledStatus case ResourceNotPresent: - return dwsv1alpha2.NotPresentStatus + return dwsv1alpha3.NotPresentStatus case ResourceOffline: - return dwsv1alpha2.OfflineStatus + return dwsv1alpha3.OfflineStatus case ResourceFailed: - return dwsv1alpha2.FailedStatus + return dwsv1alpha3.FailedStatus default: - return dwsv1alpha2.UnknownStatus + return dwsv1alpha3.UnknownStatus } } diff --git a/api/v1alpha3/nnfaccess_types.go b/api/v1alpha3/nnfaccess_types.go index dcf9b38d7..ffab249eb 100644 --- a/api/v1alpha3/nnfaccess_types.go +++ b/api/v1alpha3/nnfaccess_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +37,7 @@ type NnfAccessSpec struct { // be torn down and deleted. // +kubebuilder:validation:Enum:=PreRun;PostRun;Teardown // +kubebuilder:validation:Type:=string - TeardownState dwsv1alpha2.WorkflowState `json:"teardownState"` + TeardownState dwsv1alpha3.WorkflowState `json:"teardownState"` // Target specifies which storage targets the client should mount // - single: Only one of the storage the client can access @@ -82,7 +82,7 @@ type NnfAccessStatus struct { // Ready signifies whether status.state has been achieved Ready bool `json:"ready"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha3/nnfdatamovement_types.go b/api/v1alpha3/nnfdatamovement_types.go index 0c5342ce1..1002d940b 100644 --- a/api/v1alpha3/nnfdatamovement_types.go +++ b/api/v1alpha3/nnfdatamovement_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -198,7 +198,7 @@ type NnfDataMovementStatus struct { // updates to the Data Movement resource. CommandStatus *NnfDataMovementCommandStatus `json:"commandStatus,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` } // Types describing the various data movement status conditions. @@ -264,7 +264,7 @@ const ( DataMovementInitiatorLabel = "dm.cray.hpe.com/initiator" ) -func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha2.WorkflowState) { +func AddDataMovementTeardownStateLabel(object metav1.Object, state dwsv1alpha3.WorkflowState) { labels := object.GetLabels() if labels == nil { labels = make(map[string]string) diff --git a/api/v1alpha3/nnflustremgt_types.go b/api/v1alpha3/nnflustremgt_types.go index 98fc31417..14a79282d 100644 --- a/api/v1alpha3/nnflustremgt_types.go +++ b/api/v1alpha3/nnflustremgt_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,7 +60,7 @@ type NnfLustreMGTStatus struct { // ClaimList is the list of currently in use fsnames ClaimList []NnfLustreMGTStatusClaim `json:"claimList,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` } type NnfLustreMGTStatusClaim struct { diff --git a/api/v1alpha3/nnfnodeblockstorage_types.go b/api/v1alpha3/nnfnodeblockstorage_types.go index 41806b57e..54a0db9aa 100644 --- a/api/v1alpha3/nnfnodeblockstorage_types.go +++ b/api/v1alpha3/nnfnodeblockstorage_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,7 +48,7 @@ type NnfNodeBlockStorageStatus struct { // Allocations is the list of storage allocations that were made Allocations []NnfNodeBlockStorageAllocationStatus `json:"allocations,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` // PodStartTime is the value of pod.status.containerStatuses[].state.running.startedAt from the pod that did // last successful full reconcile of the NnfNodeBlockStorage. This is used to tell whether the /dev paths diff --git a/api/v1alpha3/nnfnodestorage_types.go b/api/v1alpha3/nnfnodestorage_types.go index a977b6774..cf5514ec0 100644 --- a/api/v1alpha3/nnfnodestorage_types.go +++ b/api/v1alpha3/nnfnodestorage_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,7 +97,7 @@ type NnfNodeStorageStatus struct { Ready bool `json:"ready,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` } // NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage diff --git a/api/v1alpha3/nnfstorage_types.go b/api/v1alpha3/nnfstorage_types.go index 841e2e9a8..29cfe5d8a 100644 --- a/api/v1alpha3/nnfstorage_types.go +++ b/api/v1alpha3/nnfstorage_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -133,7 +133,7 @@ type NnfStorageStatus struct { // from the spec. AllocationSets []NnfStorageAllocationSetStatus `json:"allocationSets,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` // Ready reflects the status of this NNF Storage Ready bool `json:"ready,omitempty"` diff --git a/api/v1alpha3/nnfsystemstorage_types.go b/api/v1alpha3/nnfsystemstorage_types.go index cdb0628a1..be9b3ea85 100644 --- a/api/v1alpha3/nnfsystemstorage_types.go +++ b/api/v1alpha3/nnfsystemstorage_types.go @@ -20,7 +20,7 @@ package v1alpha3 import ( - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -100,7 +100,7 @@ type NnfSystemStorageStatus struct { // Ready signifies whether all work has been completed Ready bool `json:"ready"` - dwsv1alpha2.ResourceError `json:",inline"` + dwsv1alpha3.ResourceError `json:",inline"` } // +kubebuilder:object:root=true diff --git a/cmd/main.go b/cmd/main.go index 0c6103e9e..1e071fbd0 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" mpiv2beta1 "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -76,7 +76,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(nnfv1alpha1.AddToScheme(scheme)) - utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) + utilruntime.Must(dwsv1alpha3.AddToScheme(scheme)) utilruntime.Must(lusv1beta1.AddToScheme(scheme)) utilruntime.Must(mpiv2beta1.AddToScheme(scheme)) diff --git a/go.mod b/go.mod index edb41aea4..645dbf1f1 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/NearNodeFlash/nnf-sos go 1.21 require ( - github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640 + github.com/DataWorkflowServices/dws v0.0.1-0.20241107211258-04c972c3e0b0 github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c github.com/NearNodeFlash/nnf-ec v0.0.1-0.20241017152925-afc4d0cf1a4b github.com/ghodss/yaml v1.0.0 diff --git a/go.sum b/go.sum index 7bb263ea8..6e1e5cb93 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640 h1:JSjgesWkPo9sAc7QkjWisNDOlIOGR0MQX/hxXL56FTA= -github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640/go.mod h1:6MrEEHISskyooSKcKU6R3mFqH6Yh6KzWgajhcw2s+nM= +github.com/DataWorkflowServices/dws v0.0.1-0.20241107211258-04c972c3e0b0 h1:+WFhwlSfKTkVeNOioZj0A6/ju54gi/+OoWLHZQples0= +github.com/DataWorkflowServices/dws v0.0.1-0.20241107211258-04c972c3e0b0/go.mod h1:6MrEEHISskyooSKcKU6R3mFqH6Yh6KzWgajhcw2s+nM= github.com/HewlettPackard/structex v1.0.4 h1:RVTdN5FWhDWr1IkjllU8wxuLjISo4gr6u5ryZpzyHcA= github.com/HewlettPackard/structex v1.0.4/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20240925185149-26d9d6071a1c h1:fSuMz3j8UzlYZI59Ded8XuUjYd7C5IyLB55jwgSTIew= diff --git a/internal/controller/directivebreakdown_controller.go b/internal/controller/directivebreakdown_controller.go index fd3e1dfa2..95b5b1aae 100644 --- a/internal/controller/directivebreakdown_controller.go +++ b/internal/controller/directivebreakdown_controller.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" @@ -66,14 +66,14 @@ type DirectiveBreakdownReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } type lustreComponentType struct { - strategy dwsv1alpha2.AllocationStrategy + strategy dwsv1alpha3.AllocationStrategy cap int64 labelsStr string - colocationKey *dwsv1alpha2.AllocationSetColocationConstraint + colocationKey *dwsv1alpha3.AllocationSetColocationConstraint } //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=directivebreakdowns,verbs=get;list;watch;create;update;patch;delete @@ -92,13 +92,13 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R metrics.NnfDirectiveBreakdownReconcilesTotal.Inc() - dbd := &dwsv1alpha2.DirectiveBreakdown{} + dbd := &dwsv1alpha3.DirectiveBreakdown{} err = r.Get(ctx, req.NamespacedName, dbd) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.DirectiveBreakdownStatus](dbd) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.DirectiveBreakdownStatus](dbd) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { dbd.Status.SetResourceErrorAndLog(err, log) }() @@ -111,7 +111,7 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R } // Delete all children that are owned by this DirectiveBreakdown. - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, dbd) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, dbd) if err != nil { return ctrl.Result{}, err } @@ -148,7 +148,7 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R argsMap, err := dwdparse.BuildArgsMap(dbd.Spec.Directive) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("invalid DW directive: %s", dbd.Spec.Directive).WithError(err).WithUserMessage("invalid DW directive").WithFatal() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("invalid DW directive: %s", dbd.Spec.Directive).WithError(err).WithUserMessage("invalid DW directive").WithFatal() } commonResourceName, commonResourceNamespace := getStorageReferenceNameFromDBD(dbd) @@ -174,8 +174,8 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } - dbd.Status.Storage = &dwsv1alpha2.StorageBreakdown{ - Lifetime: dwsv1alpha2.StorageLifetimePersistent, + dbd.Status.Storage = &dwsv1alpha3.StorageBreakdown{ + Lifetime: dwsv1alpha3.StorageLifetimePersistent, Reference: persistentStorage.Status.Servers, } @@ -185,7 +185,7 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R } case "persistentdw": // Find the peristentStorageInstance that the persistentdw is referencing - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: commonResourceName, Namespace: commonResourceNamespace, @@ -197,7 +197,7 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, err } - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.Status.Servers.Name, Namespace: persistentStorage.Status.Servers.Namespace, @@ -212,12 +212,12 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R // Create a location constraint for the compute nodes based on what type of file system // the persistent storage is using. - dbd.Status.Compute = &dwsv1alpha2.ComputeBreakdown{ - Constraints: dwsv1alpha2.ComputeConstraints{}, + dbd.Status.Compute = &dwsv1alpha3.ComputeBreakdown{ + Constraints: dwsv1alpha3.ComputeConstraints{}, } for i := range servers.Spec.AllocationSets { - constraint := dwsv1alpha2.ComputeLocationConstraint{ + constraint := dwsv1alpha3.ComputeLocationConstraint{ Reference: v1.ObjectReference{ Kind: persistentStorage.Status.Servers.Kind, Name: persistentStorage.Status.Servers.Name, @@ -228,25 +228,25 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R if argsMap["type"] == "lustre" { // Lustre requires a network connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationNetwork, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationNetwork, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) } else if argsMap["type"] == "gfs2" { // GFS2 requires both PCIe and network connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationNetwork, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationNetwork, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationPhysical, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationPhysical, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) } else { // XFS and Raw only require PCIe connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationPhysical, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationPhysical, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) } dbd.Status.Compute.Constraints.Location = append(dbd.Status.Compute.Constraints.Location, constraint) @@ -273,13 +273,13 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R } serversReference := v1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Servers{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Servers{}).Name(), Name: servers.Name, Namespace: servers.Namespace, } - dbd.Status.Storage = &dwsv1alpha2.StorageBreakdown{ - Lifetime: dwsv1alpha2.StorageLifetimeJob, + dbd.Status.Storage = &dwsv1alpha3.StorageBreakdown{ + Lifetime: dwsv1alpha3.StorageLifetimeJob, Reference: serversReference, } @@ -291,14 +291,14 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R // Create a location constraint for the compute nodes based on what type of file system // will be created. - dbd.Status.Compute = &dwsv1alpha2.ComputeBreakdown{ - Constraints: dwsv1alpha2.ComputeConstraints{}, + dbd.Status.Compute = &dwsv1alpha3.ComputeBreakdown{ + Constraints: dwsv1alpha3.ComputeConstraints{}, } for i, allocationSet := range dbd.Status.Storage.AllocationSets { - constraint := dwsv1alpha2.ComputeLocationConstraint{ + constraint := dwsv1alpha3.ComputeLocationConstraint{ Reference: v1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Servers{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Servers{}).Name(), Name: servers.Name, Namespace: servers.Namespace, FieldPath: fmt.Sprintf("servers.spec.allocationSets[%d]", i), @@ -307,35 +307,35 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R if argsMap["type"] == "lustre" { // Lustre requires a network connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationNetwork, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationNetwork, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) // If the "ColocateComputes" option is specified, force the computes to have a // physical connection to the storage to limit their placement targetOptions := pinnedProfile.GetLustreMiscOptions(allocationSet.Label) if targetOptions.ColocateComputes { - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationPhysical, - Priority: dwsv1alpha2.ComputeLocationPriorityBestEffort, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationPhysical, + Priority: dwsv1alpha3.ComputeLocationPriorityBestEffort, }) } } else if argsMap["type"] == "gfs2" { // GFS2 requires both PCIe and network connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationNetwork, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationNetwork, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationPhysical, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationPhysical, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) } else { // XFS and Raw only require PCIe connection between compute and Rabbit - constraint.Access = append(constraint.Access, dwsv1alpha2.ComputeLocationAccess{ - Type: dwsv1alpha2.ComputeLocationPhysical, - Priority: dwsv1alpha2.ComputeLocationPriorityMandatory, + constraint.Access = append(constraint.Access, dwsv1alpha3.ComputeLocationAccess{ + Type: dwsv1alpha3.ComputeLocationPhysical, + Priority: dwsv1alpha3.ComputeLocationPriorityMandatory, }) } @@ -350,10 +350,10 @@ func (r *DirectiveBreakdownReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } -func (r *DirectiveBreakdownReconciler) createOrUpdatePersistentStorageInstance(ctx context.Context, dbd *dwsv1alpha2.DirectiveBreakdown, name string, argsMap map[string]string) (*dwsv1alpha2.PersistentStorageInstance, error) { +func (r *DirectiveBreakdownReconciler) createOrUpdatePersistentStorageInstance(ctx context.Context, dbd *dwsv1alpha3.DirectiveBreakdown, name string, argsMap map[string]string) (*dwsv1alpha3.PersistentStorageInstance, error) { log := r.Log.WithValues("DirectiveBreakdown", client.ObjectKeyFromObject(dbd)) - psi := &dwsv1alpha2.PersistentStorageInstance{ + psi := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: dbd.Namespace, @@ -365,14 +365,14 @@ func (r *DirectiveBreakdownReconciler) createOrUpdatePersistentStorageInstance(c // Only set the owner references during the create. The workflow controller // will remove the reference after setup phase has completed if psi.Spec.Name == "" { - dwsv1alpha2.AddOwnerLabels(psi, dbd) + dwsv1alpha3.AddOwnerLabels(psi, dbd) err := ctrl.SetControllerReference(dbd, psi, r.Scheme) if err != nil { return err } } else { if psi.Spec.UserID != dbd.Spec.UserID { - return dwsv1alpha2.NewResourceError("existing persistent storage user ID %v does not match user ID %v", psi.Spec.UserID, dbd.Spec.UserID).WithUserMessage("User ID does not match existing persistent storage").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("existing persistent storage user ID %v does not match user ID %v", psi.Spec.UserID, dbd.Spec.UserID).WithUserMessage("User ID does not match existing persistent storage").WithFatal().WithUser() } } @@ -380,7 +380,7 @@ func (r *DirectiveBreakdownReconciler) createOrUpdatePersistentStorageInstance(c psi.Spec.FsType = argsMap["type"] psi.Spec.DWDirective = dbd.Spec.Directive psi.Spec.UserID = dbd.Spec.UserID - psi.Spec.State = dwsv1alpha2.PSIStateActive + psi.Spec.State = dwsv1alpha3.PSIStateActive return nil }) @@ -406,10 +406,10 @@ func (r *DirectiveBreakdownReconciler) createOrUpdatePersistentStorageInstance(c return psi, err } -func (r *DirectiveBreakdownReconciler) createServers(ctx context.Context, serversName string, serversNamespace string, dbd *dwsv1alpha2.DirectiveBreakdown) (*dwsv1alpha2.Servers, error) { +func (r *DirectiveBreakdownReconciler) createServers(ctx context.Context, serversName string, serversNamespace string, dbd *dwsv1alpha3.DirectiveBreakdown) (*dwsv1alpha3.Servers, error) { log := r.Log.WithValues("DirectiveBreakdown", client.ObjectKeyFromObject(dbd)) - server := &dwsv1alpha2.Servers{ + server := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: serversName, Namespace: serversNamespace, @@ -418,8 +418,8 @@ func (r *DirectiveBreakdownReconciler) createServers(ctx context.Context, server result, err := ctrl.CreateOrUpdate(ctx, r.Client, server, func() error { - dwsv1alpha2.InheritParentLabels(server, dbd) - dwsv1alpha2.AddOwnerLabels(server, dbd) + dwsv1alpha3.InheritParentLabels(server, dbd) + dwsv1alpha3.AddOwnerLabels(server, dbd) return ctrl.SetControllerReference(dbd, server, r.Scheme) }) @@ -446,13 +446,13 @@ func (r *DirectiveBreakdownReconciler) createServers(ctx context.Context, server } // populateDirectiveBreakdown parses the #DW to pull out the relevant information for the WLM to see. -func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Context, dbd *dwsv1alpha2.DirectiveBreakdown, commonResourceName string, argsMap map[string]string) error { +func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Context, dbd *dwsv1alpha3.DirectiveBreakdown, commonResourceName string, argsMap map[string]string) error { log := r.Log.WithValues("DirectiveBreakdown", client.ObjectKeyFromObject(dbd)) // The pinned profile will be named for the NnfStorage. nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, dbd.GetNamespace(), commonResourceName) if err != nil { - return dwsv1alpha2.NewResourceError("unable to find pinned NnfStorageProfile: %s/%s", commonResourceName, dbd.GetNamespace()).WithError(err).WithUserMessage("Unable to find pinned NnfStorageProfile").WithMajor() + return dwsv1alpha3.NewResourceError("unable to find pinned NnfStorageProfile: %s/%s", commonResourceName, dbd.GetNamespace()).WithError(err).WithUserMessage("Unable to find pinned NnfStorageProfile").WithMajor() } // The directive has been validated by the webhook, so we can assume the pieces we need are in the map. @@ -461,25 +461,25 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont breakdownCapacity, _ := getCapacityInBytes(capacity) if breakdownCapacity == 0 && capacityExists { - return dwsv1alpha2.NewResourceError("").WithUserMessage("'capacity' must be a non-zero value").WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("'capacity' must be a non-zero value").WithFatal() } // allocationSets represents the result we need to produce. // We build it then check to see if the directiveBreakdown's // AllocationSet matches. If so, we don't change it. - var allocationSets []dwsv1alpha2.StorageAllocationSet + var allocationSets []dwsv1alpha3.StorageAllocationSet // Depending on the #DW's filesystem (#DW type=<>) , we have different work to do switch filesystem { case "raw": scalingFactor, err := strconv.ParseFloat(nnfStorageProfile.Data.RawStorage.CapacityScalingFactor, 64) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for raw allocation").WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for raw allocation").WithFatal() } breakdownCapacity = int64(scalingFactor * float64(breakdownCapacity)) - component := dwsv1alpha2.StorageAllocationSet{} - populateStorageAllocationSet(&component, dwsv1alpha2.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.RawStorage.StorageLabels, filesystem, nil) + component := dwsv1alpha3.StorageAllocationSet{} + populateStorageAllocationSet(&component, dwsv1alpha3.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.RawStorage.StorageLabels, filesystem, nil) log.Info("allocationSets", "comp", component) @@ -487,12 +487,12 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont case "xfs": scalingFactor, err := strconv.ParseFloat(nnfStorageProfile.Data.XFSStorage.CapacityScalingFactor, 64) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for xfs allocation").WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for xfs allocation").WithFatal() } breakdownCapacity = int64(scalingFactor * float64(breakdownCapacity)) - component := dwsv1alpha2.StorageAllocationSet{} - populateStorageAllocationSet(&component, dwsv1alpha2.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.XFSStorage.StorageLabels, filesystem, nil) + component := dwsv1alpha3.StorageAllocationSet{} + populateStorageAllocationSet(&component, dwsv1alpha3.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.XFSStorage.StorageLabels, filesystem, nil) log.Info("allocationSets", "comp", component) @@ -500,12 +500,12 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont case "gfs2": scalingFactor, err := strconv.ParseFloat(nnfStorageProfile.Data.GFS2Storage.CapacityScalingFactor, 64) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for gfs2 allocation").WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for gfs2 allocation").WithFatal() } breakdownCapacity = int64(scalingFactor * float64(breakdownCapacity)) - component := dwsv1alpha2.StorageAllocationSet{} - populateStorageAllocationSet(&component, dwsv1alpha2.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.GFS2Storage.StorageLabels, filesystem, nil) + component := dwsv1alpha3.StorageAllocationSet{} + populateStorageAllocationSet(&component, dwsv1alpha3.AllocatePerCompute, breakdownCapacity, 0, 0, nnfStorageProfile.Data.GFS2Storage.StorageLabels, filesystem, nil) log.Info("allocationSets", "comp", component) @@ -513,7 +513,7 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont case "lustre": scalingFactor, err := strconv.ParseFloat(nnfStorageProfile.Data.LustreStorage.CapacityScalingFactor, 64) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for lustre allocation").WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("invalid capacityScalingFactor for lustre allocation").WithFatal() } breakdownCapacity = int64(scalingFactor * float64(breakdownCapacity)) mdtCapacity, _ := getCapacityInBytes(nnfStorageProfile.Data.LustreStorage.CapacityMDT) @@ -522,12 +522,12 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont // We need 3 distinct components for Lustre, ost, mdt, and mgt var lustreComponents []lustreComponentType - lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha2.AllocateAcrossServers, breakdownCapacity, "ost", nil}) + lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha3.AllocateAcrossServers, breakdownCapacity, "ost", nil}) - mgtKey := &dwsv1alpha2.AllocationSetColocationConstraint{Type: "exclusive", Key: "lustre-mgt"} - var mdtKey *dwsv1alpha2.AllocationSetColocationConstraint + mgtKey := &dwsv1alpha3.AllocationSetColocationConstraint{Type: "exclusive", Key: "lustre-mgt"} + var mdtKey *dwsv1alpha3.AllocationSetColocationConstraint if nnfStorageProfile.Data.LustreStorage.ExclusiveMDT { - mdtKey = &dwsv1alpha2.AllocationSetColocationConstraint{Type: "exclusive"} + mdtKey = &dwsv1alpha3.AllocationSetColocationConstraint{Type: "exclusive"} } if nnfStorageProfile.Data.LustreStorage.CombinedMGTMDT { @@ -536,41 +536,41 @@ func (r *DirectiveBreakdownReconciler) populateStorageBreakdown(ctx context.Cont if mdtKey != nil { useKey = mdtKey } - lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha2.AllocateAcrossServers, mdtCapacity, "mgtmdt", useKey}) + lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha3.AllocateAcrossServers, mdtCapacity, "mgtmdt", useKey}) } else if len(nnfStorageProfile.Data.LustreStorage.ExternalMGS) > 0 { - lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha2.AllocateAcrossServers, mdtCapacity, "mdt", mdtKey}) + lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha3.AllocateAcrossServers, mdtCapacity, "mdt", mdtKey}) } else if len(nnfStorageProfile.Data.LustreStorage.StandaloneMGTPoolName) > 0 { if argsMap["command"] != "create_persistent" { - return dwsv1alpha2.NewResourceError("").WithUserMessage("standaloneMgtPoolName option can only be used with 'create_persistent' directive").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("standaloneMgtPoolName option can only be used with 'create_persistent' directive").WithFatal().WithUser() } - lustreComponents = []lustreComponentType{{dwsv1alpha2.AllocateSingleServer, mgtCapacity, "mgt", mgtKey}} + lustreComponents = []lustreComponentType{{dwsv1alpha3.AllocateSingleServer, mgtCapacity, "mgt", mgtKey}} } else { - lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha2.AllocateAcrossServers, mdtCapacity, "mdt", mdtKey}) - lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha2.AllocateSingleServer, mgtCapacity, "mgt", mgtKey}) + lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha3.AllocateAcrossServers, mdtCapacity, "mdt", mdtKey}) + lustreComponents = append(lustreComponents, lustreComponentType{dwsv1alpha3.AllocateSingleServer, mgtCapacity, "mgt", mgtKey}) } for _, i := range lustreComponents { targetMiscOptions := nnfStorageProfile.GetLustreMiscOptions(i.labelsStr) - component := dwsv1alpha2.StorageAllocationSet{} + component := dwsv1alpha3.StorageAllocationSet{} populateStorageAllocationSet(&component, i.strategy, i.cap, targetMiscOptions.Scale, targetMiscOptions.Count, targetMiscOptions.StorageLabels, i.labelsStr, i.colocationKey) allocationSets = append(allocationSets, component) } default: - return dwsv1alpha2.NewResourceError("invalid DW directive file system type: %s", filesystem).WithUserMessage("invalid DW directive").WithFatal() + return dwsv1alpha3.NewResourceError("invalid DW directive file system type: %s", filesystem).WithUserMessage("invalid DW directive").WithFatal() } if dbd.Status.Storage == nil { - dbd.Status.Storage = &dwsv1alpha2.StorageBreakdown{} + dbd.Status.Storage = &dwsv1alpha3.StorageBreakdown{} } dbd.Status.Storage.AllocationSets = allocationSets return nil } -func populateRequiredDaemons(dbd *dwsv1alpha2.DirectiveBreakdown, argsMap map[string]string) { +func populateRequiredDaemons(dbd *dwsv1alpha3.DirectiveBreakdown, argsMap map[string]string) { if wordList, present := argsMap["requires"]; present { dbd.Status.RequiredDaemons = strings.Split(wordList, ",") } @@ -602,38 +602,38 @@ func getCapacityInBytes(capacity string) (int64, error) { // matches[0] is the entire string, we want the parts. val, err := strconv.ParseFloat(matches[1], 64) if err != nil { - return 0, dwsv1alpha2.NewResourceError("invalid capacity string, %s", capacity) + return 0, dwsv1alpha3.NewResourceError("invalid capacity string, %s", capacity) } return int64(math.Round(val * powers[matches[3]])), nil } -func populateStorageAllocationSet(a *dwsv1alpha2.StorageAllocationSet, strategy dwsv1alpha2.AllocationStrategy, cap int64, scale int, count int, storageLabels []string, labelStr string, constraint *dwsv1alpha2.AllocationSetColocationConstraint) { +func populateStorageAllocationSet(a *dwsv1alpha3.StorageAllocationSet, strategy dwsv1alpha3.AllocationStrategy, cap int64, scale int, count int, storageLabels []string, labelStr string, constraint *dwsv1alpha3.AllocationSetColocationConstraint) { a.AllocationStrategy = strategy a.Label = labelStr a.MinimumCapacity = cap - a.Constraints.Labels = append(storageLabels, dwsv1alpha2.StorageTypeLabel+"=Rabbit") + a.Constraints.Labels = append(storageLabels, dwsv1alpha3.StorageTypeLabel+"=Rabbit") a.Constraints.Scale = scale a.Constraints.Count = count if constraint != nil { - a.Constraints.Colocation = []dwsv1alpha2.AllocationSetColocationConstraint{*constraint} + a.Constraints.Colocation = []dwsv1alpha3.AllocationSetColocationConstraint{*constraint} } } // SetupWithManager sets up the controller with the Manager. func (r *DirectiveBreakdownReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ - &dwsv1alpha2.ServersList{}, + r.ChildObjects = []dwsv1alpha3.ObjectList{ + &dwsv1alpha3.ServersList{}, &nnfv1alpha3.NnfStorageProfileList{}, - &dwsv1alpha2.PersistentStorageInstanceList{}, + &dwsv1alpha3.PersistentStorageInstanceList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.DirectiveBreakdown{}). - Owns(&dwsv1alpha2.Servers{}). - Owns(&dwsv1alpha2.PersistentStorageInstance{}). + For(&dwsv1alpha3.DirectiveBreakdown{}). + Owns(&dwsv1alpha3.Servers{}). + Owns(&dwsv1alpha3.PersistentStorageInstance{}). Owns(&nnfv1alpha3.NnfStorageProfile{}). Complete(r) } diff --git a/internal/controller/directivebreakdown_controller_test.go b/internal/controller/directivebreakdown_controller_test.go index e489f6902..cca8e110b 100644 --- a/internal/controller/directivebreakdown_controller_test.go +++ b/internal/controller/directivebreakdown_controller_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -53,12 +53,12 @@ var _ = Describe("DirectiveBreakdown test", func() { It("Creates a DirectiveBreakdown with a jobdw", func() { By("Creating a DirectiveBreakdown") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "jobdw-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW jobdw name=jobdw-xfs type=xfs capacity=1GiB", }, } @@ -71,7 +71,7 @@ var _ = Describe("DirectiveBreakdown test", func() { }).Should(BeTrue()) Expect(directiveBreakdown.Status.RequiredDaemons).Should(BeEmpty()) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: directiveBreakdown.GetName(), Namespace: directiveBreakdown.GetNamespace(), @@ -104,12 +104,12 @@ var _ = Describe("DirectiveBreakdown test", func() { It("Creates a DirectiveBreakdown with a jobdw having required daemons", func() { By("Creating a DirectiveBreakdown") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "jobdw-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW jobdw name=jobdw-xfs type=xfs requires=copy-offload capacity=1GiB", }, } @@ -131,12 +131,12 @@ var _ = Describe("DirectiveBreakdown test", func() { It("Verifies DirectiveBreakdowns with persistent storage", func() { By("Creating a DirectiveBreakdown with create_persistent") - directiveBreakdownOne := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdownOne := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "create-persistent-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW create_persistent name=persistent-storage type=xfs capacity=1GiB", }, } @@ -148,7 +148,7 @@ var _ = Describe("DirectiveBreakdown test", func() { return directiveBreakdownOne.Status.Ready }).Should(BeTrue()) - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: "persistent-storage", Namespace: directiveBreakdownOne.GetNamespace(), @@ -159,12 +159,12 @@ var _ = Describe("DirectiveBreakdown test", func() { }).Should(Succeed(), "Create the PersistentStorageInstance resource") By("Creating a DirectiveBreakdown with persistentdw") - directiveBreakdownTwo := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdownTwo := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "use-persistent-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW persistentdw name=persistent-storage", }, } @@ -195,12 +195,12 @@ var _ = Describe("DirectiveBreakdown test", func() { }).Should(Succeed()) By("Creating a DirectiveBreakdown") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "standalone-lustre-jobdw-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW jobdw name=jobdw-lustre type=lustre capacity=1GiB", }, } @@ -222,12 +222,12 @@ var _ = Describe("DirectiveBreakdown test", func() { }).Should(Succeed()) By("Creating a DirectiveBreakdown") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "standalone-xfs-jobdw-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW jobdw name=jobdw-xfs type=xfs capacity=1GiB", }, } @@ -249,12 +249,12 @@ var _ = Describe("DirectiveBreakdown test", func() { }).Should(Succeed()) By("Creating a DirectiveBreakdown") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: "standalone-lustre-persistent-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.DirectiveBreakdownSpec{ + Spec: dwsv1alpha3.DirectiveBreakdownSpec{ Directive: "#DW create_persistent name=persistent-lustre type=lustre", }, } diff --git a/internal/controller/dws_servers_controller.go b/internal/controller/dws_servers_controller.go index 5a6019310..c3819ff4b 100644 --- a/internal/controller/dws_servers_controller.go +++ b/internal/controller/dws_servers_controller.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -86,7 +86,7 @@ func (r *DWSServersReconciler) Reconcile(ctx context.Context, req ctrl.Request) log := r.Log.WithValues("Servers", req.NamespacedName) metrics.NnfServersReconcilesTotal.Inc() - servers := &dwsv1alpha2.Servers{} + servers := &dwsv1alpha3.Servers{} if err := r.Get(ctx, req.NamespacedName, servers); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -94,7 +94,7 @@ func (r *DWSServersReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.ServersStatus](servers) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.ServersStatus](servers) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { servers.Status.SetResourceErrorAndLog(err, log) }() @@ -161,7 +161,7 @@ func (r *DWSServersReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.updateCapacityUsed(ctx, servers) } -func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers *dwsv1alpha2.Servers) (ctrl.Result, error) { +func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers *dwsv1alpha3.Servers) (ctrl.Result, error) { originalServers := servers.DeepCopy() if len(servers.Status.AllocationSets) == 0 { @@ -217,11 +217,11 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * // If the nnfStorage was created using information from the Servers resource, then // we should always find a match. if serversIndex == -1 { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to find allocation label %s", label).WithFatal() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("unable to find allocation label %s", label).WithFatal() } // Loop through the nnfNodeStorages corresponding to each of the Rabbit nodes and find - matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) + matchLabels := dwsv1alpha3.MatchingOwner(nnfStorage) matchLabels[nnfv1alpha3.AllocationSetLabel] = label listOptions := []client.ListOption{ @@ -258,7 +258,7 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * } for name, capacityAllocated := range capacityAllocatedMap { - servers.Status.AllocationSets[serversIndex].Storage[name] = dwsv1alpha2.ServersStatusStorage{AllocationSize: capacityAllocated} + servers.Status.AllocationSets[serversIndex].Storage[name] = dwsv1alpha3.ServersStatusStorage{AllocationSize: capacityAllocated} } for _, storageStatus := range servers.Status.AllocationSets[serversIndex].Storage { @@ -291,14 +291,14 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * } // Reset the allocation information from the status section to empty values -func (r *DWSServersReconciler) clearAllocationStatus(servers *dwsv1alpha2.Servers) { - servers.Status.AllocationSets = []dwsv1alpha2.ServersStatusAllocationSet{} +func (r *DWSServersReconciler) clearAllocationStatus(servers *dwsv1alpha3.Servers) { + servers.Status.AllocationSets = []dwsv1alpha3.ServersStatusAllocationSet{} for _, allocationSetSpec := range servers.Spec.AllocationSets { - allocationSetStatus := dwsv1alpha2.ServersStatusAllocationSet{} + allocationSetStatus := dwsv1alpha3.ServersStatusAllocationSet{} allocationSetStatus.Label = allocationSetSpec.Label - allocationSetStatus.Storage = make(map[string]dwsv1alpha2.ServersStatusStorage) + allocationSetStatus.Storage = make(map[string]dwsv1alpha3.ServersStatusStorage) for _, storage := range allocationSetSpec.Storage { - allocationSetStatus.Storage[storage.Name] = dwsv1alpha2.ServersStatusStorage{AllocationSize: 0} + allocationSetStatus.Storage[storage.Name] = dwsv1alpha3.ServersStatusStorage{AllocationSize: 0} } servers.Status.AllocationSets = append(servers.Status.AllocationSets, allocationSetStatus) @@ -306,7 +306,7 @@ func (r *DWSServersReconciler) clearAllocationStatus(servers *dwsv1alpha2.Server } // Either the NnfStorage has not been created yet, or it existed and has been deleted -func (r *DWSServersReconciler) statusSetEmpty(ctx context.Context, servers *dwsv1alpha2.Servers) (ctrl.Result, error) { +func (r *DWSServersReconciler) statusSetEmpty(ctx context.Context, servers *dwsv1alpha3.Servers) (ctrl.Result, error) { // Keep the original to check later for updates originalServers := servers.DeepCopy() @@ -324,7 +324,7 @@ func (r *DWSServersReconciler) statusSetEmpty(ctx context.Context, servers *dwsv } // Update Status if we've eclipsed the batch time -func (r *DWSServersReconciler) statusUpdate(ctx context.Context, servers *dwsv1alpha2.Servers, batch bool) (ctrl.Result, error) { +func (r *DWSServersReconciler) statusUpdate(ctx context.Context, servers *dwsv1alpha3.Servers, batch bool) (ctrl.Result, error) { log := r.Log.WithValues("Servers", types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}) if batch == true && servers.Status.LastUpdate != nil { batchTime, err := strconv.Atoi(os.Getenv("SERVERS_BATCH_TIME_MSEC")) @@ -359,7 +359,7 @@ func (r *DWSServersReconciler) statusUpdate(ctx context.Context, servers *dwsv1a // Wait for the NnfStorage resource to be deleted. We'll update the servers status to reflect // capacity being freed. -func (r *DWSServersReconciler) checkDeletedStorage(ctx context.Context, servers *dwsv1alpha2.Servers) (deletedStorage, error) { +func (r *DWSServersReconciler) checkDeletedStorage(ctx context.Context, servers *dwsv1alpha3.Servers) (deletedStorage, error) { log := r.Log.WithValues("Servers", types.NamespacedName{Name: servers.Name, Namespace: servers.Namespace}) // Get the NnfStorage with the same name/namespace as the servers resource @@ -393,7 +393,7 @@ func (r *DWSServersReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.Servers{}). + For(&dwsv1alpha3.Servers{}). Watches(&nnfv1alpha3.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(nnfStorageServersMapFunc)). Complete(r) } diff --git a/internal/controller/dws_storage_controller.go b/internal/controller/dws_storage_controller.go index bdfae142f..574d544bf 100644 --- a/internal/controller/dws_storage_controller.go +++ b/internal/controller/dws_storage_controller.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -68,7 +68,7 @@ type K8sNodeState struct { func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { log := r.Log.WithValues("resource", req.NamespacedName.String()) - storage := &dwsv1alpha2.Storage{} + storage := &dwsv1alpha3.Storage{} if err := r.Get(ctx, req.NamespacedName, storage); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -79,12 +79,12 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - if storageType := labels[dwsv1alpha2.StorageTypeLabel]; storageType != "Rabbit" { + if storageType := labels[dwsv1alpha3.StorageTypeLabel]; storageType != "Rabbit" { return ctrl.Result{}, nil } // Create the status updater to update the status section if any changes are made - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.StorageStatus](storage) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.StorageStatus](storage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Check if the object is being deleted @@ -92,8 +92,8 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - if storage.Spec.State == dwsv1alpha2.DisabledState { - storage.Status.Status = dwsv1alpha2.DisabledStatus + if storage.Spec.State == dwsv1alpha3.DisabledState { + storage.Status.Status = dwsv1alpha3.DisabledStatus storage.Status.Message = "Storage node manually disabled" } @@ -114,23 +114,23 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, client.IgnoreNotFound(err) } - storage.Status.Type = dwsv1alpha2.NVMe + storage.Status.Type = dwsv1alpha3.NVMe storage.Status.Capacity = nnfNode.Status.Capacity - storage.Status.Access.Protocol = dwsv1alpha2.PCIe + storage.Status.Access.Protocol = dwsv1alpha3.PCIe if len(nnfNode.Status.Servers) == 0 { return ctrl.Result{}, nil // Wait until severs array has been filled in with Rabbit info } // Populate server status' - Server 0 is reserved as the Rabbit node. - storage.Status.Access.Servers = []dwsv1alpha2.Node{{ + storage.Status.Access.Servers = []dwsv1alpha3.Node{{ Name: storage.Name, Status: nnfNode.Status.Servers[0].Status.ConvertToDWSResourceStatus(), }} // Populate compute status' if len(nnfNode.Status.Servers) > 1 { - storage.Status.Access.Computes = make([]dwsv1alpha2.Node, 0) + storage.Status.Access.Computes = make([]dwsv1alpha3.Node, 0) for _, server := range nnfNode.Status.Servers[1:] /*Skip Rabbit*/ { // Servers that are unassigned in the system configuration will @@ -140,7 +140,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } storage.Status.Access.Computes = append(storage.Status.Access.Computes, - dwsv1alpha2.Node{ + dwsv1alpha3.Node{ Name: server.Hostname, Status: server.Status.ConvertToDWSResourceStatus(), }) @@ -148,7 +148,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Populate storage status' - storage.Status.Devices = make([]dwsv1alpha2.StorageDevice, len(nnfNode.Status.Drives)) + storage.Status.Devices = make([]dwsv1alpha3.StorageDevice, len(nnfNode.Status.Drives)) for idx, drive := range nnfNode.Status.Drives { device := &storage.Status.Devices[idx] @@ -172,12 +172,12 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // If the Rabbit is disabled we don't have to check the fenced status - if storage.Spec.State == dwsv1alpha2.DisabledState { + if storage.Spec.State == dwsv1alpha3.DisabledState { return ctrl.Result{}, nil } // Clear the fence status if the storage resource is enabled from a disabled state - if storage.Status.Status == dwsv1alpha2.DisabledStatus { + if storage.Status.Status == dwsv1alpha3.DisabledStatus { if nnfNode.Status.Fenced { log.WithValues("fenced", nnfNode.Status.Fenced).Info("resource disabled") @@ -199,7 +199,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } if nnfNode.Status.Fenced { - storage.Status.Status = dwsv1alpha2.DegradedStatus + storage.Status.Status = dwsv1alpha3.DegradedStatus storage.Status.RebootRequired = true storage.Status.Message = "Storage node requires reboot to recover from STONITH event" } else { @@ -210,11 +210,11 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) if !nodeState.nodeReady { log.Info("storage node is offline") - storage.Status.Status = dwsv1alpha2.OfflineStatus + storage.Status.Status = dwsv1alpha3.OfflineStatus storage.Status.Message = "Kubernetes node is offline" } else if len(nodeState.nnfTaint) > 0 { log.Info(fmt.Sprintf("storage node is tainted with %s", nodeState.nnfTaint)) - storage.Status.Status = dwsv1alpha2.DrainedStatus + storage.Status.Status = dwsv1alpha3.DrainedStatus storage.Status.Message = fmt.Sprintf("Kubernetes node is tainted with %s", nodeState.nnfTaint) } else { storage.Status.Status = nnfNode.Status.Status.ConvertToDWSResourceStatus() @@ -224,7 +224,7 @@ func (r *DWSStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -func (r *DWSStorageReconciler) coreNodeState(ctx context.Context, storage *dwsv1alpha2.Storage) (K8sNodeState, error) { +func (r *DWSStorageReconciler) coreNodeState(ctx context.Context, storage *dwsv1alpha3.Storage) (K8sNodeState, error) { nodeState := K8sNodeState{} // Get the kubernetes node resource corresponding to the same node as the nnfNode resource. @@ -272,7 +272,7 @@ func (r *DWSStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). - For(&dwsv1alpha2.Storage{}). + For(&dwsv1alpha3.Storage{}). Watches(&nnfv1alpha3.NnfNode{}, handler.EnqueueRequestsFromMapFunc(nnfNodeMapFunc)). Watches(&corev1.Node{}, handler.EnqueueRequestsFromMapFunc(nodeMapFunc)). Complete(r) diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go index aafc352e8..19a9017dc 100644 --- a/internal/controller/filesystem_helpers.go +++ b/internal/controller/filesystem_helpers.go @@ -34,7 +34,7 @@ import ( "github.com/NearNodeFlash/nnf-sos/pkg/filesystem" "github.com/go-logr/logr" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -46,12 +46,12 @@ func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nn blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create mock block device").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create mock block device").WithError(err).WithMajor() } fileSystem, err := newMockFileSystem(ctx, c, nnfNodeStorage, blockDevice, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create mock file system").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create mock file system").WithError(err).WithMajor() } return blockDevice, fileSystem, nil @@ -66,43 +66,43 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt nnfStorageProfile, err := getPinnedStorageProfileFromLabel(ctx, c, nnfNodeStorage) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not find pinned storage profile").WithError(err).WithFatal() + return nil, nil, dwsv1alpha3.NewResourceError("could not find pinned storage profile").WithError(err).WithFatal() } switch nnfNodeStorage.Spec.FileSystemType { case "raw": blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.RawStorage.CmdLines, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create LVM block device").WithError(err).WithMajor() } fileSystem, err := newBindFileSystem(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.RawStorage.CmdLines, blockDevice, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create XFS file system").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create XFS file system").WithError(err).WithMajor() } return blockDevice, fileSystem, nil case "xfs": blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.XFSStorage.CmdLines, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create LVM block device").WithError(err).WithMajor() } fileSystem, err := newXfsFileSystem(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.XFSStorage.CmdLines, blockDevice, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create XFS file system").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create XFS file system").WithError(err).WithMajor() } return blockDevice, fileSystem, nil case "gfs2": blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.GFS2Storage.CmdLines, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create LVM block device").WithError(err).WithMajor() } fileSystem, err := newGfs2FileSystem(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.GFS2Storage.CmdLines, blockDevice, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create GFS2 file system").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create GFS2 file system").WithError(err).WithMajor() } return blockDevice, fileSystem, nil @@ -123,12 +123,12 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt commandLines = nnfStorageProfile.Data.LustreStorage.OstCmdLines break default: - return nil, nil, dwsv1alpha2.NewResourceError("invalid Lustre target type %s", nnfNodeStorage.Spec.LustreStorage.TargetType).WithFatal() + return nil, nil, dwsv1alpha3.NewResourceError("invalid Lustre target type %s", nnfNodeStorage.Spec.LustreStorage.TargetType).WithFatal() } blockDevice, err := newZpoolBlockDevice(ctx, c, nnfNodeStorage, commandLines, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create zpool block device").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create zpool block device").WithError(err).WithMajor() } mountCommand := "" @@ -140,7 +140,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt fileSystem, err := newLustreFileSystem(ctx, c, nnfNodeStorage, commandLines, mountCommand, blockDevice, index, log) if err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not create lustre file system").WithError(err).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not create lustre file system").WithError(err).WithMajor() } return blockDevice, fileSystem, nil @@ -148,7 +148,7 @@ func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeSt break } - return nil, nil, dwsv1alpha2.NewResourceError("unsupported file system type %s", nnfNodeStorage.Spec.FileSystemType).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("unsupported file system type %s", nnfNodeStorage.Spec.FileSystemType).WithMajor() } func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBlockStorage *nnfv1alpha3.NnfNodeBlockStorage) (bool, error) { @@ -164,7 +164,7 @@ func isNodeBlockStorageCurrent(ctx context.Context, c client.Client, nnfNodeBloc } if err := c.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil { - return false, dwsv1alpha2.NewResourceError("could not get pod: %v", client.ObjectKeyFromObject(pod)).WithError(err) + return false, dwsv1alpha3.NewResourceError("could not get pod: %v", client.ObjectKeyFromObject(pod)).WithError(err) } // The controllers for the NnfNodeStorage and NnfNodeBlockStorage both run in the same pod. Make sure that the NnfNodeBlockStorage @@ -204,11 +204,11 @@ func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *n } if err := c.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() } if nnfNodeBlockStorage.Status.Ready == false { - return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) + return nil, dwsv1alpha3.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) } // If the NnfNodeBlockStorage hasn't been updated by this pod yet, then wait for that to happen. The /dev paths may change if the node was @@ -219,12 +219,12 @@ func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *n } if !current { - return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v has stale status", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("NnfNodeBlockStorage: %v has stale status", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err) } zpoolName, err := zpoolName(ctx, c, nnfNodeStorage, nnfNodeStorage.Spec.LustreStorage.TargetType, nnfNodeStorage.Spec.LustreStorage.StartIndex+index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not create zpool name").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create zpool name").WithError(err).WithMajor() } zpool.Log = log @@ -256,11 +256,11 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf err := c.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() } if nnfNodeBlockStorage.Status.Ready == false { - return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) + return nil, dwsv1alpha3.NewResourceError("NnfNodeBlockStorage: %v not ready", client.ObjectKeyFromObject(nnfNodeBlockStorage)) } // If the NnfNodeBlockStorage hasn't been updated by this pod yet, then wait for that to happen. The /dev paths may change if the node was @@ -271,7 +271,7 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf } if !current { - return nil, dwsv1alpha2.NewResourceError("NnfNodeBlockStorage: %v has stale status", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("NnfNodeBlockStorage: %v has stale status", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err) } if len(nnfNodeBlockStorage.Status.Allocations) > 0 && len(nnfNodeBlockStorage.Status.Allocations[blockIndex].Accesses) > 0 { @@ -297,12 +297,12 @@ func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnf vgName, err := volumeGroupName(ctx, c, nnfNodeStorage, blockIndex) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get volume group name").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get volume group name").WithError(err).WithMajor() } lvName, err := logicalVolumeName(ctx, c, nnfNodeStorage, index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get logical volume name").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get logical volume name").WithError(err).WithMajor() } percentVG := 100 @@ -416,7 +416,7 @@ func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *n targetPath, err := lustreTargetPath(ctx, c, nnfNodeStorage, nnfNodeStorage.Spec.LustreStorage.TargetType, nnfNodeStorage.Spec.LustreStorage.StartIndex+index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get lustre target mount path").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get lustre target mount path").WithError(err).WithMajor() } fs.Log = log @@ -460,7 +460,7 @@ func lustreTargetPath(ctx context.Context, c client.Client, nnfNodeStorage *nnfv // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. // This is important for persistent instances - nnfStorageUid, ok := labels[dwsv1alpha2.OwnerUidLabel] + nnfStorageUid, ok := labels[dwsv1alpha3.OwnerUidLabel] if !ok { return "", fmt.Errorf("missing Owner UID label on NnfNodeStorage") } @@ -473,7 +473,7 @@ func zpoolName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha3 // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. // This is important for persistent instances - nnfStorageUid, ok := labels[dwsv1alpha2.OwnerUidLabel] + nnfStorageUid, ok := labels[dwsv1alpha3.OwnerUidLabel] if !ok { return "", fmt.Errorf("missing Owner UID label on NnfNodeStorage") } @@ -486,7 +486,7 @@ func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1 // Use the NnfStorage UID since the NnfStorage exists for as long as the storage allocation exists. // This is important for persistent instances - nnfStorageUid, ok := labels[dwsv1alpha2.OwnerUidLabel] + nnfStorageUid, ok := labels[dwsv1alpha3.OwnerUidLabel] if !ok { return "", fmt.Errorf("missing Owner UID label on NnfNodeStorage") } diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go index 2d1ee4d14..8951378a2 100644 --- a/internal/controller/integration_test.go +++ b/internal/controller/integration_test.go @@ -39,7 +39,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" dwparse "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" @@ -58,8 +58,8 @@ var _ = Describe("Integration Test", func() { blockOwnerDeletion := true var ( - workflow *dwsv1alpha2.Workflow - persistentInstance *dwsv1alpha2.PersistentStorageInstance + workflow *dwsv1alpha3.Workflow + persistentInstance *dwsv1alpha3.PersistentStorageInstance nodeNames []string setup sync.Once storageProfile *nnfv1alpha3.NnfStorageProfile @@ -67,7 +67,7 @@ var _ = Describe("Integration Test", func() { dmm *nnfv1alpha3.NnfDataMovementManager ) - advanceState := func(state dwsv1alpha2.WorkflowState, w *dwsv1alpha2.Workflow, testStackOffset int) { + advanceState := func(state dwsv1alpha3.WorkflowState, w *dwsv1alpha3.Workflow, testStackOffset int) { By(fmt.Sprintf("Advancing to %s state, wf %s", state, w.Name)) Eventually(func() error { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(w), w)).WithOffset(testStackOffset).To(Succeed()) @@ -76,7 +76,7 @@ var _ = Describe("Integration Test", func() { }).WithOffset(testStackOffset).Should(Succeed(), fmt.Sprintf("Advancing to %s state", state)) By(fmt.Sprintf("Waiting on state %s", state)) - Eventually(func() dwsv1alpha2.WorkflowState { + Eventually(func() dwsv1alpha3.WorkflowState { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(w), w)).WithOffset(testStackOffset).To(Succeed()) return w.Status.State }).WithOffset(testStackOffset).Should(Equal(state), fmt.Sprintf("Waiting on state %s", state)) @@ -103,7 +103,7 @@ var _ = Describe("Integration Test", func() { } } - advanceStateAndCheckReady := func(state dwsv1alpha2.WorkflowState, w *dwsv1alpha2.Workflow) { + advanceStateAndCheckReady := func(state dwsv1alpha3.WorkflowState, w *dwsv1alpha3.Workflow) { By(fmt.Sprintf("advanceStateAndCheckReady: advance workflow state %s", state)) // If this method fails, have the test results report where it was called from rather @@ -121,14 +121,14 @@ var _ = Describe("Integration Test", func() { // If we're currently in a staging state, ensure the data movement status is marked as finished so // we can successfully transition out of that state. - if w.Status.State == dwsv1alpha2.StateDataIn || w.Status.State == dwsv1alpha2.StateDataOut { + if w.Status.State == dwsv1alpha3.StateDataIn || w.Status.State == dwsv1alpha3.StateDataOut { findDataMovementDirectiveIndex := func() int { for idx, directive := range w.Spec.DWDirectives { - if state == dwsv1alpha2.StateDataIn && strings.HasPrefix(directive, "#DW copy_in") { + if state == dwsv1alpha3.StateDataIn && strings.HasPrefix(directive, "#DW copy_in") { return idx } - if state == dwsv1alpha2.StateDataOut && strings.HasPrefix(directive, "#DW copy_out") { + if state == dwsv1alpha3.StateDataOut && strings.HasPrefix(directive, "#DW copy_out") { return idx } } @@ -155,7 +155,7 @@ var _ = Describe("Integration Test", func() { return w.Status.Ready }).WithOffset(testStackOffset).Should(BeTrue(), fmt.Sprintf("Waiting on ready status state %s", state)) - if w.Status.State == dwsv1alpha2.StateSetup { + if w.Status.State == dwsv1alpha3.StateSetup { for dwIndex, directive := range w.Spec.DWDirectives { dwArgs, err := dwparse.BuildArgsMap(directive) Expect(err).WithOffset(testStackOffset).To(Succeed()) @@ -176,10 +176,10 @@ var _ = Describe("Integration Test", func() { verifyNnfNodeStoragesHaveStorageProfileLabel(nnfStorage) } } - } // advanceStateAndCheckReady(state dwsv1alpha2.WorkflowState, w *dwsv1alpha2.Workflow) + } // advanceStateAndCheckReady(state dwsv1alpha3.WorkflowState, w *dwsv1alpha3.Workflow) - checkPSIConsumerReference := func(storageName string, w *dwsv1alpha2.Workflow) { - persistentInstance = &dwsv1alpha2.PersistentStorageInstance{ + checkPSIConsumerReference := func(storageName string, w *dwsv1alpha3.Workflow) { + persistentInstance = &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: storageName, Namespace: w.Namespace, @@ -195,17 +195,17 @@ var _ = Describe("Integration Test", func() { Expect(persistentInstance.Spec.ConsumerReferences[0].Namespace).To(Equal(w.Namespace)) } - checkPSIToServerMapping := func(psiOwnedByWorkflow bool, storageName string, w *dwsv1alpha2.Workflow) { + checkPSIToServerMapping := func(psiOwnedByWorkflow bool, storageName string, w *dwsv1alpha3.Workflow) { workFlowOwnerRef := metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: w.GetUID(), Name: w.GetName(), Controller: &controller, BlockOwnerDeletion: &blockOwnerDeletion, } - persistentInstance = &dwsv1alpha2.PersistentStorageInstance{ + persistentInstance = &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: storageName, Namespace: w.Namespace, @@ -225,8 +225,8 @@ var _ = Describe("Integration Test", func() { // running in the testenv so we can't prove it is deleted on teardown. // See https://book.kubebuilder.io/reference/envtest.html#testing-considerations psiOwnerRef := metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.PersistentStorageInstance{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.PersistentStorageInstance{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: persistentInstance.GetUID(), Name: persistentInstance.GetName(), Controller: &controller, @@ -234,23 +234,23 @@ var _ = Describe("Integration Test", func() { } By("Checking DW Directive has Servers resource, named from the PSI") - servers := &dwsv1alpha2.Servers{} + servers := &dwsv1alpha3.Servers{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentInstance), servers)).To(Succeed()) Expect(servers.ObjectMeta.OwnerReferences).To(ContainElement(psiOwnerRef), "Servers owned by PSI") By("Checking PersistentStorageInstance has reference to its Servers resource now that DirectiveBreakdown controller has finished") - Expect(persistentInstance.Status.Servers.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Servers{}).Name())) + Expect(persistentInstance.Status.Servers.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Servers{}).Name())) Expect(persistentInstance.Status.Servers.Name).To(Equal(persistentInstance.Name)) Expect(persistentInstance.Status.Servers.Namespace).To(Equal(persistentInstance.Namespace)) } checkServersToNnfStorageMapping := func(nnfStoragePresent bool) { - servers := &dwsv1alpha2.Servers{} + servers := &dwsv1alpha3.Servers{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentInstance), servers)).To(Succeed(), "Fetch Servers") persistentStorageOwnerRef := metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.PersistentStorageInstance{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.PersistentStorageInstance{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: persistentInstance.GetUID(), Name: persistentInstance.GetName(), Controller: &controller, @@ -306,17 +306,17 @@ var _ = Describe("Integration Test", func() { } generator := computeNameGeneratorFunc() - configSpec := dwsv1alpha2.SystemConfigurationSpec{} + configSpec := dwsv1alpha3.SystemConfigurationSpec{} for _, nodeName := range nodeNames { - storageNode := dwsv1alpha2.SystemConfigurationStorageNode{ + storageNode := dwsv1alpha3.SystemConfigurationStorageNode{ Type: "Rabbit", Name: nodeName, } computeNames := generator() - storageNode.ComputesAccess = make([]dwsv1alpha2.SystemConfigurationComputeNodeReference, 0) + storageNode.ComputesAccess = make([]dwsv1alpha3.SystemConfigurationComputeNodeReference, 0) for idx, name := range computeNames { - computesAccess := dwsv1alpha2.SystemConfigurationComputeNodeReference{ + computesAccess := dwsv1alpha3.SystemConfigurationComputeNodeReference{ Name: name, Index: idx, } @@ -325,7 +325,7 @@ var _ = Describe("Integration Test", func() { configSpec.StorageNodes = append(configSpec.StorageNodes, storageNode) } - config := &dwsv1alpha2.SystemConfiguration{ + config := &dwsv1alpha3.SystemConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, @@ -376,7 +376,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) // Check that the DWS storage resource was updated with the compute node information - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Namespace: corev1.NamespaceDefault, @@ -460,7 +460,7 @@ var _ = Describe("Integration Test", func() { } - config := &dwsv1alpha2.SystemConfiguration{ + config := &dwsv1alpha3.SystemConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, @@ -468,7 +468,7 @@ var _ = Describe("Integration Test", func() { } Expect(k8sClient.Delete(context.TODO(), config)).To(Succeed()) - tempConfig := &dwsv1alpha2.SystemConfiguration{} + tempConfig := &dwsv1alpha3.SystemConfiguration{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(config), tempConfig) }).ShouldNot(Succeed()) @@ -533,13 +533,13 @@ var _ = Describe("Integration Test", func() { wfid := uuid.NewString()[0:8] By(fmt.Sprintf("Testing directive '%s' filesystem '%s'", storageDirective, fsType)) - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s-%s", storageDirectiveName, fsType, wfid), Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromInt(idx), WLMID: "Test WLMID", DWDirectives: []string{ @@ -558,8 +558,8 @@ var _ = Describe("Integration Test", func() { // Store ownership reference to workflow - this is checked for many of the created objects ownerRef := metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: workflow.GetUID(), Name: workflow.GetName(), Controller: &controller, @@ -571,25 +571,25 @@ var _ = Describe("Integration Test", func() { By("Checking proposal state and ready") Eventually(func() bool { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateProposal && workflow.Status.Ready + return workflow.Status.State == dwsv1alpha3.StateProposal && workflow.Status.Ready }).Should(BeTrue()) By("Checking for Computes resource") - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Status.Computes.Name, Namespace: workflow.Status.Computes.Namespace, }, } - Expect(workflow.Status.Computes.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Computes{}).Name())) + Expect(workflow.Status.Computes.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Computes{}).Name())) Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(computes), computes)).To(Succeed()) Expect(computes.ObjectMeta.OwnerReferences).To(ContainElement(ownerRef)) By("Checking various DW Directive Breakdowns") Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(wfTests[idx].expectedDirectiveBreakdowns)) for _, dbdRef := range workflow.Status.DirectiveBreakdowns { - dbd := &dwsv1alpha2.DirectiveBreakdown{} - Expect(dbdRef.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.DirectiveBreakdown{}).Name())) + dbd := &dwsv1alpha3.DirectiveBreakdown{} + Expect(dbdRef.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.DirectiveBreakdown{}).Name())) By("DW Directive Breakdown should go ready") Eventually(func() bool { @@ -618,8 +618,8 @@ var _ = Describe("Integration Test", func() { Expect(dbd.Status.Compute.Constraints.Location).ToNot(BeEmpty()) for _, location := range dbd.Status.Compute.Constraints.Location { - servers := &dwsv1alpha2.Servers{} - Expect(location.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Servers{}).Name())) + servers := &dwsv1alpha3.Servers{} + Expect(location.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Servers{}).Name())) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: location.Reference.Name, Namespace: location.Reference.Namespace}, servers)).To(Succeed()) } } else { @@ -635,16 +635,16 @@ var _ = Describe("Integration Test", func() { Expect(dbd.Status.Storage.AllocationSets).To(HaveLen(expectedAllocationSets)) By("DW Directive has Servers resource accessible from the DirectiveBreakdown") - servers := &dwsv1alpha2.Servers{} - Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Servers{}).Name())) + servers := &dwsv1alpha3.Servers{} + Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Servers{}).Name())) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("DW Directive verifying servers resource") switch storageDirective { case "jobdw": Expect(servers.ObjectMeta.OwnerReferences).To(ContainElement(metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.DirectiveBreakdown{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.DirectiveBreakdown{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: dbd.GetUID(), Name: dbd.GetName(), Controller: &controller, @@ -658,9 +658,9 @@ var _ = Describe("Integration Test", func() { By("Assigning storage") if fsType != "lustre" { // If non-lustre, allocate storage on all the Rabbit nodes in test. - storage := make([]dwsv1alpha2.ServersSpecStorage, 0, len(nodeNames)) + storage := make([]dwsv1alpha3.ServersSpecStorage, 0, len(nodeNames)) for _, nodeName := range nodeNames { - storage = append(storage, dwsv1alpha2.ServersSpecStorage{ + storage = append(storage, dwsv1alpha3.ServersSpecStorage{ AllocationCount: 1, Name: nodeName, }) @@ -669,8 +669,8 @@ var _ = Describe("Integration Test", func() { Expect(dbd.Status.Storage.AllocationSets).To(HaveLen(1)) allocSet := &dbd.Status.Storage.AllocationSets[0] - servers.Spec.AllocationSets = make([]dwsv1alpha2.ServersSpecAllocationSet, 1) - servers.Spec.AllocationSets[0] = dwsv1alpha2.ServersSpecAllocationSet{ + servers.Spec.AllocationSets = make([]dwsv1alpha3.ServersSpecAllocationSet, 1) + servers.Spec.AllocationSets[0] = dwsv1alpha3.ServersSpecAllocationSet{ AllocationSize: allocSet.MinimumCapacity, Label: allocSet.Label, Storage: storage, @@ -679,12 +679,12 @@ var _ = Describe("Integration Test", func() { } else { // If lustre, allocate one node per allocation set Expect(len(nodeNames) >= len(dbd.Status.Storage.AllocationSets)).To(BeTrue()) - servers.Spec.AllocationSets = make([]dwsv1alpha2.ServersSpecAllocationSet, len(dbd.Status.Storage.AllocationSets)) + servers.Spec.AllocationSets = make([]dwsv1alpha3.ServersSpecAllocationSet, len(dbd.Status.Storage.AllocationSets)) for idx, allocset := range dbd.Status.Storage.AllocationSets { - servers.Spec.AllocationSets[idx] = dwsv1alpha2.ServersSpecAllocationSet{ + servers.Spec.AllocationSets[idx] = dwsv1alpha3.ServersSpecAllocationSet{ AllocationSize: allocset.MinimumCapacity, Label: allocset.Label, - Storage: []dwsv1alpha2.ServersSpecStorage{ + Storage: []dwsv1alpha3.ServersSpecStorage{ { AllocationCount: 1, Name: nodeNames[idx], @@ -699,15 +699,15 @@ var _ = Describe("Integration Test", func() { By("Assigning computes") Expect(computes.Data).To(HaveLen(0)) - computes.Data = make([]dwsv1alpha2.ComputesData, 0, len(nodeNames)) + computes.Data = make([]dwsv1alpha3.ComputesData, 0, len(nodeNames)) for idx := range nodeNames { - computes.Data = append(computes.Data, dwsv1alpha2.ComputesData{Name: fmt.Sprintf("compute%d", idx*16)}) + computes.Data = append(computes.Data, dwsv1alpha3.ComputesData{Name: fmt.Sprintf("compute%d", idx*16)}) } Expect(k8sClient.Update(context.TODO(), computes)).To(Succeed()) /***************************** Setup *****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateSetup, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateSetup, workflow) By("Checking Setup state") switch storageDirective { @@ -723,21 +723,21 @@ var _ = Describe("Integration Test", func() { /**************************** Data In ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateDataIn, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateDataIn, workflow) By("Checking Data In state") // TODO /**************************** Pre Run ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StatePreRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePreRun, workflow) By("Checking Pre Run state") switch storageDirective { default: for _, dbdRef := range workflow.Status.DirectiveBreakdowns { - dbd := &dwsv1alpha2.DirectiveBreakdown{} + dbd := &dwsv1alpha3.DirectiveBreakdown{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check for an NNF Access describing the computes") @@ -750,7 +750,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(access), access)).To(Succeed()) Expect(access.ObjectMeta.OwnerReferences).To(ContainElement(ownerRef)) Expect(access.Spec).To(MatchFields(IgnoreExtras, Fields{ - "TeardownState": Equal(dwsv1alpha2.StatePostRun), + "TeardownState": Equal(dwsv1alpha3.StatePostRun), "DesiredState": Equal("mounted"), "Target": Equal("single"), })) @@ -764,9 +764,9 @@ var _ = Describe("Integration Test", func() { Expect(access.Spec.ClientReference).To(MatchFields(IgnoreExtras, Fields{ "Name": Equal(workflow.Name), "Namespace": Equal(workflow.Namespace), - "Kind": Equal(reflect.TypeOf(dwsv1alpha2.Computes{}).Name()), + "Kind": Equal(reflect.TypeOf(dwsv1alpha3.Computes{}).Name()), })) - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: access.Spec.ClientReference.Name, Namespace: access.Spec.ClientReference.Namespace, @@ -795,7 +795,7 @@ var _ = Describe("Integration Test", func() { By("Checking for a Client Mount on each compute") for _, compute := range computes.Data { - clientMount := &dwsv1alpha2.ClientMount{ + clientMount := &dwsv1alpha3.ClientMount{ ObjectMeta: metav1.ObjectMeta{ Name: clientMountName(access), Namespace: compute.Name, @@ -803,8 +803,8 @@ var _ = Describe("Integration Test", func() { } Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(clientMount), clientMount)).To(Succeed()) Expect(clientMount.Status.Mounts).To(HaveLen(1)) - Expect(clientMount.Labels[dwsv1alpha2.WorkflowNameLabel]).To(Equal(workflow.Name)) - Expect(clientMount.Labels[dwsv1alpha2.WorkflowNamespaceLabel]).To(Equal(workflow.Namespace)) + Expect(clientMount.Labels[dwsv1alpha3.WorkflowNameLabel]).To(Equal(workflow.Name)) + Expect(clientMount.Labels[dwsv1alpha3.WorkflowNamespaceLabel]).To(Equal(workflow.Namespace)) Expect(clientMount.Status.Mounts[0].Ready).To(BeTrue()) } @@ -830,14 +830,14 @@ var _ = Describe("Integration Test", func() { /*************************** Post Run ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StatePostRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePostRun, workflow) By("Checking Post Run state") switch storageDirective { default: for _, dbdRef := range workflow.Status.DirectiveBreakdowns { - dbd := &dwsv1alpha2.DirectiveBreakdown{} + dbd := &dwsv1alpha3.DirectiveBreakdown{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) By("Check that NNF Access describing computes is not present") @@ -877,27 +877,27 @@ var _ = Describe("Integration Test", func() { /**************************** Data Out ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateDataOut, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateDataOut, workflow) By("Checking Data Out state") // TODO /**************************** Teardown ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateTeardown, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateTeardown, workflow) By("Checking Teardown state") for _, dbdRef := range workflow.Status.DirectiveBreakdowns { - dbd := &dwsv1alpha2.DirectiveBreakdown{} + dbd := &dwsv1alpha3.DirectiveBreakdown{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) switch storageDirective { case "create_persistent": By("Check that the servers resource still exists") - servers := &dwsv1alpha2.Servers{} - Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Servers{}).Name())) + servers := &dwsv1alpha3.Servers{} + Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Servers{}).Name())) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages for persistentStorageInstance should NOT be deleted") @@ -911,8 +911,8 @@ var _ = Describe("Integration Test", func() { case "jobdw": By("Check that the servers resource still exists") - servers := &dwsv1alpha2.Servers{} - Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Servers{}).Name())) + servers := &dwsv1alpha3.Servers{} + Expect(dbd.Status.Storage.Reference.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Servers{}).Name())) Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) By("NNFStorages associated with jobdw should be deleted") @@ -930,13 +930,13 @@ var _ = Describe("Integration Test", func() { It("Testing Lifecycle of workflow with no #DW directives", func() { const workflowName = "no-directives" - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: workflowName, Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, WLMID: "854973", DWDirectives: []string{}, // Empty }, @@ -951,11 +951,11 @@ var _ = Describe("Integration Test", func() { By("Verifying proposal state and ready") Eventually(func() bool { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateProposal && workflow.Status.Ready + return workflow.Status.State == dwsv1alpha3.StateProposal && workflow.Status.Ready }).Should(BeTrue()) By("Verifying it has no directiveBreakdowns") - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", workflowName, 0), Namespace: corev1.NamespaceDefault, @@ -964,7 +964,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(directiveBreakdown), directiveBreakdown)).ShouldNot(Succeed()) By("Verifying it has no servers") - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", workflowName, 0), Namespace: corev1.NamespaceDefault, @@ -974,8 +974,8 @@ var _ = Describe("Integration Test", func() { // Store ownership reference to workflow - this is checked for many of the created objects ownerRef := metav1.OwnerReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), - APIVersion: dwsv1alpha2.GroupVersion.String(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), + APIVersion: dwsv1alpha3.GroupVersion.String(), UID: workflow.GetUID(), Name: workflow.GetName(), Controller: &controller, @@ -983,18 +983,18 @@ var _ = Describe("Integration Test", func() { } By("Verifying it has Computes resource") - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Status.Computes.Name, Namespace: workflow.Status.Computes.Namespace, }, } - Expect(workflow.Status.Computes.Kind).To(Equal(reflect.TypeOf(dwsv1alpha2.Computes{}).Name())) + Expect(workflow.Status.Computes.Kind).To(Equal(reflect.TypeOf(dwsv1alpha3.Computes{}).Name())) Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(computes), computes)).To(Succeed()) Expect(computes.ObjectMeta.OwnerReferences).To(ContainElement(ownerRef)) /***************************** Teardown *****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateTeardown, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateTeardown, workflow) }) }) @@ -1016,13 +1016,13 @@ var _ = Describe("Integration Test", func() { k8sClient.Create(context.TODO(), ns) wfid := uuid.NewString()[0:8] - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("gfs2-%s", wfid), Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromString("a job id"), WLMID: "Test WLMID", }, @@ -1071,30 +1071,30 @@ var _ = Describe("Integration Test", func() { By("Checking for proposal state and ready") Eventually(func() bool { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateProposal && workflow.Status.Ready + return workflow.Status.State == dwsv1alpha3.StateProposal && workflow.Status.Ready }).Should(BeTrue()) for _, dbdRef := range workflow.Status.DirectiveBreakdowns { By("Checking DW Directive Breakdown") - dbd := &dwsv1alpha2.DirectiveBreakdown{} + dbd := &dwsv1alpha3.DirectiveBreakdown{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbdRef.Name, Namespace: dbdRef.Namespace}, dbd)).To(Succeed()) Expect(dbd.Status.Storage.AllocationSets).To(HaveLen(1)) allocSet := &dbd.Status.Storage.AllocationSets[0] By("Assigning storage") - servers := &dwsv1alpha2.Servers{} + servers := &dwsv1alpha3.Servers{} Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace}, servers)).To(Succeed()) - storage := make([]dwsv1alpha2.ServersSpecStorage, 0, len(nodeNames)) + storage := make([]dwsv1alpha3.ServersSpecStorage, 0, len(nodeNames)) for _, nodeName := range nodeNames { - storage = append(storage, dwsv1alpha2.ServersSpecStorage{ + storage = append(storage, dwsv1alpha3.ServersSpecStorage{ AllocationCount: 1, Name: nodeName, }) } - servers.Spec.AllocationSets = []dwsv1alpha2.ServersSpecAllocationSet{ + servers.Spec.AllocationSets = []dwsv1alpha3.ServersSpecAllocationSet{ { AllocationSize: allocSet.MinimumCapacity, Label: allocSet.Label, @@ -1107,18 +1107,18 @@ var _ = Describe("Integration Test", func() { /***************************** Setup *****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateSetup, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateSetup, workflow) /**************************** Data In ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateDataIn, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateDataIn, workflow) }) AfterEach(func() { /**************************** Teardown ****************************/ - advanceStateAndCheckReady(dwsv1alpha2.StateTeardown, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateTeardown, workflow) Expect(k8sClient.Delete(context.TODO(), dmm)).To(Succeed()) Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present @@ -1148,7 +1148,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Delete(context.TODO(), lustre)).To(Succeed()) }) - validateNnfAccessHasCorrectTeardownState := func(state dwsv1alpha2.WorkflowState) { + validateNnfAccessHasCorrectTeardownState := func(state dwsv1alpha3.WorkflowState) { Expect(workflow.Status.DirectiveBreakdowns).To(HaveLen(1)) access := &nnfv1alpha3.NnfAccess{ @@ -1198,14 +1198,14 @@ var _ = Describe("Integration Test", func() { }) It("Validates Workflow", func() { - Expect(workflow.Status.State).To(Equal(dwsv1alpha2.StateDataIn)) + Expect(workflow.Status.State).To(Equal(dwsv1alpha3.StateDataIn)) By("Check that NNF Access is created, with deletion in post-run") - validateNnfAccessHasCorrectTeardownState(dwsv1alpha2.StatePostRun) + validateNnfAccessHasCorrectTeardownState(dwsv1alpha3.StatePostRun) By("Advancing to post run, ensure NNF Access is deleted") - advanceStateAndCheckReady(dwsv1alpha2.StatePreRun, workflow) - advanceStateAndCheckReady(dwsv1alpha2.StatePostRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePreRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePostRun, workflow) validateNnfAccessIsNotFound() }) }) @@ -1221,20 +1221,20 @@ var _ = Describe("Integration Test", func() { }) It("Validates Workflow", func() { - Expect(workflow.Status.State).To(Equal(dwsv1alpha2.StateDataIn)) + Expect(workflow.Status.State).To(Equal(dwsv1alpha3.StateDataIn)) validateNnfAccessIsNotFound() - advanceStateAndCheckReady(dwsv1alpha2.StatePreRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePreRun, workflow) By("Validate NNF Access is created, with deletion in data-out") - validateNnfAccessHasCorrectTeardownState(dwsv1alpha2.StateTeardown) + validateNnfAccessHasCorrectTeardownState(dwsv1alpha3.StateTeardown) By("Advancing to post run, ensure NNF Access is still set for deletion in data-out") - advanceStateAndCheckReady(dwsv1alpha2.StatePostRun, workflow) - validateNnfAccessHasCorrectTeardownState(dwsv1alpha2.StateTeardown) + advanceStateAndCheckReady(dwsv1alpha3.StatePostRun, workflow) + validateNnfAccessHasCorrectTeardownState(dwsv1alpha3.StateTeardown) By("Advancing to data-out, ensure NNF Access is deleted") - advanceStateAndCheckReady(dwsv1alpha2.StateDataOut, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateDataOut, workflow) validateNnfAccessIsNotFound() }) }) @@ -1249,16 +1249,16 @@ var _ = Describe("Integration Test", func() { }) It("Validates Workflow", func() { - Expect(workflow.Status.State).To(Equal(dwsv1alpha2.StateDataIn)) + Expect(workflow.Status.State).To(Equal(dwsv1alpha3.StateDataIn)) By("Validate NNF Access is created, with deletion in data-out") - validateNnfAccessHasCorrectTeardownState(dwsv1alpha2.StateTeardown) + validateNnfAccessHasCorrectTeardownState(dwsv1alpha3.StateTeardown) - advanceStateAndCheckReady(dwsv1alpha2.StatePreRun, workflow) - advanceStateAndCheckReady(dwsv1alpha2.StatePostRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePreRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePostRun, workflow) By("Advancing to data-out, ensure NNF Access is deleted") - advanceStateAndCheckReady(dwsv1alpha2.StateDataOut, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateDataOut, workflow) validateNnfAccessIsNotFound() }) @@ -1272,9 +1272,9 @@ var _ = Describe("Integration Test", func() { }) It("Validates error propgates to workflow", func() { - Expect(workflow.Status.State).To(Equal(dwsv1alpha2.StateDataIn)) + Expect(workflow.Status.State).To(Equal(dwsv1alpha3.StateDataIn)) - advanceStateAndCheckReady(dwsv1alpha2.StatePreRun, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StatePreRun, workflow) By("Injecting an error in the data movement resource") @@ -1284,9 +1284,9 @@ var _ = Describe("Integration Test", func() { Namespace: nnfv1alpha3.DataMovementNamespace, }, } - dwsv1alpha2.AddWorkflowLabels(dm, workflow) - dwsv1alpha2.AddOwnerLabels(dm, workflow) - nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, dwsv1alpha2.StatePostRun) + dwsv1alpha3.AddWorkflowLabels(dm, workflow) + dwsv1alpha3.AddOwnerLabels(dm, workflow) + nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, dwsv1alpha3.StatePostRun) Expect(k8sClient.Create(context.TODO(), dm)).To(Succeed()) @@ -1300,16 +1300,16 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Status().Update(context.TODO(), dm)).To(Succeed()) By("Advancing to post-run") - advanceState(dwsv1alpha2.StatePostRun, workflow, 1) + advanceState(dwsv1alpha3.StatePostRun, workflow, 1) By("Checking the driver has an error present") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) driverID := os.Getenv("DWS_DRIVER_ID") for _, driver := range workflow.Status.Drivers { if driver.DriverID == driverID { - if driver.Status == dwsv1alpha2.StatusError { + if driver.Status == dwsv1alpha3.StatusError { return &driver } } @@ -1332,13 +1332,13 @@ var _ = Describe("Integration Test", func() { Expect(containerProfile).ToNot(BeNil()) wfName := "container-test" - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: wfName, Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromString("job 1234"), WLMID: "Test WLMID", DWDirectives: []string{ @@ -1371,7 +1371,7 @@ var _ = Describe("Integration Test", func() { It("it should target the local NNF nodes for the container jobs", func() { By("assigning the container directive to compute nodes") - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Status.Computes.Name, Namespace: workflow.Status.Computes.Namespace, @@ -1381,7 +1381,7 @@ var _ = Describe("Integration Test", func() { // Add 2 computes across 2 NNF nodes. For containers, this means we should see 2 jobs // These computes are defined in the SystemConfiguration Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(computes), computes)).To(Succeed()) - data := []dwsv1alpha2.ComputesData{ + data := []dwsv1alpha3.ComputesData{ {Name: "compute0"}, {Name: "compute32"}, } @@ -1394,7 +1394,7 @@ var _ = Describe("Integration Test", func() { advanceState("PreRun", workflow, 1) By("verifying the number of targeted NNF nodes for the container jobs") - matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) + matchLabels := dwsv1alpha3.MatchingWorkflow(workflow) matchLabels[nnfv1alpha3.DirectiveIndexLabel] = "0" jobList := &batchv1.JobList{} @@ -1425,8 +1425,8 @@ var _ = Describe("Integration Test", func() { profileMgsNid string - dbd *dwsv1alpha2.DirectiveBreakdown - dbdServer *dwsv1alpha2.Servers + dbd *dwsv1alpha3.DirectiveBreakdown + dbdServer *dwsv1alpha3.Servers externalMgsProfileName string combinedMgtMdtProfileName string @@ -1435,8 +1435,8 @@ var _ = Describe("Integration Test", func() { BeforeEach(func() { profileMgsNid = "profile-mgs@tcp" - dbd = &dwsv1alpha2.DirectiveBreakdown{} - dbdServer = &dwsv1alpha2.Servers{} + dbd = &dwsv1alpha3.DirectiveBreakdown{} + dbdServer = &dwsv1alpha3.Servers{} externalMgsProfileName = "has-external-mgs" combinedMgtMdtProfileName = "has-combined-mgtmdt" @@ -1480,13 +1480,13 @@ var _ = Describe("Integration Test", func() { BeforeEach(func() { By("BeforeEach setup a basic workflow resource") wfid := uuid.NewString()[0:8] - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("profile-%s", wfid), Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromString("some job 234"), WLMID: "Test WLMID", }, @@ -1495,7 +1495,7 @@ var _ = Describe("Integration Test", func() { AfterEach(func() { By("AfterEach advance the workflow state to teardown") - advanceStateAndCheckReady(dwsv1alpha2.StateTeardown, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateTeardown, workflow) }) // Create the workflow for the Ginkgo specs. @@ -1507,7 +1507,7 @@ var _ = Describe("Integration Test", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed()) Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) - if (workflow.Status.Ready == true) && (workflow.Status.State == dwsv1alpha2.StateProposal) { + if (workflow.Status.Ready == true) && (workflow.Status.State == dwsv1alpha3.StateProposal) { return nil } return fmt.Errorf("ready state not achieved") @@ -1527,11 +1527,11 @@ var _ = Describe("Integration Test", func() { }) assignStorageForMDTOST := func() { - dbdServer.Spec.AllocationSets = []dwsv1alpha2.ServersSpecAllocationSet{ + dbdServer.Spec.AllocationSets = []dwsv1alpha3.ServersSpecAllocationSet{ { AllocationSize: 1, Label: "mdt", - Storage: []dwsv1alpha2.ServersSpecStorage{ + Storage: []dwsv1alpha3.ServersSpecStorage{ {AllocationCount: 1, Name: "rabbit-test-node-0"}, }, @@ -1539,7 +1539,7 @@ var _ = Describe("Integration Test", func() { { AllocationSize: 1, Label: "ost", - Storage: []dwsv1alpha2.ServersSpecStorage{ + Storage: []dwsv1alpha3.ServersSpecStorage{ {AllocationCount: 1, Name: "rabbit-test-node-1"}, }, @@ -1563,7 +1563,7 @@ var _ = Describe("Integration Test", func() { assignStorageForMDTOST() Expect(k8sClient.Update(context.TODO(), dbdServer)).To(Succeed()) By(fmt.Sprintf("Verify that the MGS NID %s is used by the filesystem", getNidVia)) - advanceStateAndCheckReady(dwsv1alpha2.StateSetup, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateSetup, workflow) // The NnfStorage's name matches the Server resource's name. nnfstorage := &nnfv1alpha3.NnfStorage{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dbdServer), nnfstorage)).To(Succeed()) @@ -1614,13 +1614,13 @@ var _ = Describe("Integration Test", func() { // in its BeforeEach() clause. BeforeEach(func() { wfid := uuid.NewString()[0:8] - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("persistent-%s", wfid), Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromString("job 2222"), WLMID: "Test WLMID", }, @@ -1629,7 +1629,7 @@ var _ = Describe("Integration Test", func() { AfterEach(func() { By("AfterEach advance the workflow state to teardown") - advanceStateAndCheckReady(dwsv1alpha2.StateTeardown, workflow) + advanceStateAndCheckReady(dwsv1alpha3.StateTeardown, workflow) }) // Create the workflow for the Ginkgo specs. @@ -1642,15 +1642,15 @@ var _ = Describe("Integration Test", func() { // verifyErrorIsPresent checks that the workflow has stopped due to a driver error verifyErrorIsPresent := func() { By("Checking the driver has an error present") - Eventually(func(g Gomega) *dwsv1alpha2.WorkflowDriverStatus { + Eventually(func(g Gomega) *dwsv1alpha3.WorkflowDriverStatus { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(workflow), workflow)).To(Succeed()) g.Expect(workflow.Status.Ready == false) - g.Expect(workflow.Status.State == dwsv1alpha2.StateProposal) + g.Expect(workflow.Status.State == dwsv1alpha3.StateProposal) driverID := os.Getenv("DWS_DRIVER_ID") for _, driver := range workflow.Status.Drivers { if driver.DriverID == driverID { - if driver.Status == dwsv1alpha2.StatusError { + if driver.Status == dwsv1alpha3.StatusError { return &driver } } diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index 426b793e1..d916708ec 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -56,7 +56,7 @@ type NnfAccessReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } const ( @@ -118,7 +118,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, access) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, access) if err != nil { return ctrl.Result{}, err } @@ -177,12 +177,12 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if access.Status.State == "mounted" { result, err = r.mount(ctx, access, clientList, storageMapping) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("unable to mount file system on client nodes") + return ctrl.Result{}, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("unable to mount file system on client nodes") } } else { result, err = r.unmount(ctx, access, clientList, storageMapping) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("unable to unmount file system from client nodes") + return ctrl.Result{}, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("unable to unmount file system from client nodes") } } @@ -200,13 +200,13 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } -func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) (*ctrl.Result, error) { // Lock the NnfStorage by adding an annotation with the name/namespace for this // NnfAccess. This is used for non-clustered file systems that can only be mounted // from a single host. wait, err := r.lockStorage(ctx, access) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to lock storage").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to lock storage").WithError(err) } if wait { @@ -220,13 +220,13 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.Nnf return &ctrl.Result{}, nil } - return nil, dwsv1alpha2.NewResourceError("unable to add endpoints to NnfNodeStorage").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to add endpoints to NnfNodeStorage").WithError(err) } // Wait for all the devices to be made available on the correct nodes ready, err := r.getBlockStorageAccessStatus(ctx, access, storageMapping) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to check endpoints for NnfNodeStorage").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to check endpoints for NnfNodeStorage").WithError(err) } if ready == false { @@ -240,13 +240,13 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.Nnf return &ctrl.Result{}, nil } - return nil, dwsv1alpha2.NewResourceError("unable to create ClientMount resources").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to create ClientMount resources").WithError(err) } // Aggregate the status from all the ClientMount resources ready, err = r.getClientMountStatus(ctx, access, clientList) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to check ClientMount status").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to check ClientMount status").WithError(err) } // Wait for all of the ClientMounts to be ready @@ -257,17 +257,17 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha3.Nnf return nil, nil } -func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (*ctrl.Result, error) { +func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha3.NnfAccess, clientList []string, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) (*ctrl.Result, error) { // Update client mounts to trigger unmount operation err := r.manageClientMounts(ctx, access, storageMapping) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to update ClientMount resources").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to update ClientMount resources").WithError(err) } // Aggregate the status from all the ClientMount resources ready, err := r.getClientMountStatus(ctx, access, clientList) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to get ClientMount status").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to get ClientMount status").WithError(err) } // Wait for all of the ClientMounts to be ready @@ -277,12 +277,12 @@ func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha3.N err = r.removeBlockStorageAccess(ctx, access, storageMapping) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to remove NnfNodeStorage endpoints").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to remove NnfNodeStorage endpoints").WithError(err) } // Unlock the NnfStorage so it can be used by another NnfAccess if err = r.unlockStorage(ctx, access); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to unlock storage").WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to unlock storage").WithError(err) } return nil, nil @@ -405,9 +405,9 @@ func (r *NnfAccessReconciler) getClientList(ctx context.Context, access *nnfv1al // getClientListFromClientReference returns a list of client nodes names from the Computes resource func (r *NnfAccessReconciler) getClientListFromClientReference(ctx context.Context, access *nnfv1alpha3.NnfAccess) ([]string, error) { - computes := &dwsv1alpha2.Computes{} + computes := &dwsv1alpha3.Computes{} - if access.Spec.ClientReference.Kind != reflect.TypeOf(dwsv1alpha2.Computes{}).Name() { + if access.Spec.ClientReference.Kind != reflect.TypeOf(dwsv1alpha3.Computes{}).Name() { return nil, fmt.Errorf("Invalid ClientReference kind %s", access.Spec.ClientReference.Kind) } @@ -463,7 +463,7 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont } // mapClientStorage returns a map of the clients with a list of mounts to make. This picks a device for each client -func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string) (map[string][]dwsv1alpha3.ClientMountInfo, error) { nnfStorage := &nnfv1alpha3.NnfStorage{} if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { @@ -481,7 +481,7 @@ func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv // Call a helper function depending on the storage type for i := range nnfStorage.Spec.AllocationSets { - var storageMapping map[string][]dwsv1alpha2.ClientMountInfo + var storageMapping map[string][]dwsv1alpha3.ClientMountInfo var err error if nnfStorage.Spec.FileSystemType == "lustre" { @@ -504,21 +504,21 @@ func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv // mapClientNetworkStorage provides the Lustre MGS address information for the clients. All clients get the same // mount information -func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { - storageMapping := make(map[string][]dwsv1alpha2.ClientMountInfo) +func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha3.ClientMountInfo, error) { + storageMapping := make(map[string][]dwsv1alpha3.ClientMountInfo) for _, client := range clients { - mountInfo := dwsv1alpha2.ClientMountInfo{} + mountInfo := dwsv1alpha3.ClientMountInfo{} mountInfo.Type = nnfStorage.Spec.FileSystemType mountInfo.TargetType = "directory" mountInfo.MountPath = access.Spec.MountPath - mountInfo.Device.Type = dwsv1alpha2.ClientMountDeviceTypeLustre - mountInfo.Device.Lustre = &dwsv1alpha2.ClientMountDeviceLustre{} + mountInfo.Device.Type = dwsv1alpha3.ClientMountDeviceTypeLustre + mountInfo.Device.Lustre = &dwsv1alpha3.ClientMountDeviceLustre{} mountInfo.Device.Lustre.FileSystemName = nnfStorage.Status.FileSystemName mountInfo.Device.Lustre.MgsAddresses = nnfStorage.Status.MgsAddress // Make it easy for the nnf-dm daemon to find the NnfStorage. - mountInfo.Device.DeviceReference = &dwsv1alpha2.ClientMountDeviceReference{ + mountInfo.Device.DeviceReference = &dwsv1alpha3.ClientMountDeviceReference{ ObjectReference: access.Spec.StorageReference, } @@ -535,7 +535,7 @@ func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, acces // mapClientLocalStorage picks storage device(s) for each client to access based on locality information // from the (DWS) Storage resources. -func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { +func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access *nnfv1alpha3.NnfAccess, clients []string, nnfStorage *nnfv1alpha3.NnfStorage, setIndex int) (map[string][]dwsv1alpha3.ClientMountInfo, error) { allocationSetSpec := nnfStorage.Spec.AllocationSets[setIndex] // Use information from the NnfStorage resource to determine how many allocations @@ -561,12 +561,12 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access } // existingStorage is a map of Rabbits nodes and which storage they have - existingStorage := make(map[string][]dwsv1alpha2.ClientMountInfo) + existingStorage := make(map[string][]dwsv1alpha3.ClientMountInfo) // Read each NnfNodeStorage resource and find the NVMe information for each // allocation. for nodeName, storageCount := range storageCountMap { - matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) + matchLabels := dwsv1alpha3.MatchingOwner(nnfStorage) matchLabels[nnfv1alpha3.AllocationSetLabel] = allocationSetSpec.Name listOptions := []client.ListOption{ @@ -582,7 +582,7 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // Check that the correct number of NnfNodeStorage resources were found for this // Rabbit. if len(nnfNodeStorageList.Items) != storageCount.instanceCount { - return nil, dwsv1alpha2.NewResourceError("incorrect number of NnfNodeStorages. found %d. Needed %d.", len(nnfNodeStorageList.Items), storageCount.instanceCount).WithMajor() + return nil, dwsv1alpha3.NewResourceError("incorrect number of NnfNodeStorages. found %d. Needed %d.", len(nnfNodeStorageList.Items), storageCount.instanceCount).WithMajor() } for _, nnfNodeStorage := range nnfNodeStorageList.Items { @@ -590,13 +590,13 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // Loop through each allocation to pull out the device information and build the // mount information for i := 0; i < nnfNodeStorage.Spec.Count; i++ { - mountInfo := dwsv1alpha2.ClientMountInfo{} + mountInfo := dwsv1alpha3.ClientMountInfo{} // Set the DeviceReference to the NnfNodeStorage allocation regardless of whether we're mounting on // the Rabbit or the compute node. The compute node ClientMount device type will not be set to "reference", // so clientmountd will not look at the DeviceReference struct. The DeviceReference information is used by // the data movement code to match up mounts between the Rabbit and compute node. - mountInfo.Device.DeviceReference = &dwsv1alpha2.ClientMountDeviceReference{} + mountInfo.Device.DeviceReference = &dwsv1alpha3.ClientMountDeviceReference{} mountInfo.Device.DeviceReference.ObjectReference.Kind = reflect.TypeOf(nnfv1alpha3.NnfNodeStorage{}).Name() mountInfo.Device.DeviceReference.ObjectReference.Name = nnfNodeStorage.Name mountInfo.Device.DeviceReference.ObjectReference.Namespace = nnfNodeStorage.Namespace @@ -624,14 +624,14 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access if access.Spec.ClientReference == (corev1.ObjectReference{}) { indexMountDir := getIndexMountDir(nnfNodeStorage.Namespace, i) mountInfo.MountPath = filepath.Join(access.Spec.MountPathPrefix, indexMountDir) - mountInfo.Device.Type = dwsv1alpha2.ClientMountDeviceTypeReference + mountInfo.Device.Type = dwsv1alpha3.ClientMountDeviceTypeReference } else { mountInfo.MountPath = access.Spec.MountPath - mountInfo.Device.Type = dwsv1alpha2.ClientMountDeviceTypeLVM - mountInfo.Device.LVM = &dwsv1alpha2.ClientMountDeviceLVM{} + mountInfo.Device.Type = dwsv1alpha3.ClientMountDeviceTypeLVM + mountInfo.Device.LVM = &dwsv1alpha3.ClientMountDeviceLVM{} mountInfo.Device.LVM.VolumeGroup = nnfNodeStorage.Status.Allocations[i].VolumeGroup mountInfo.Device.LVM.LogicalVolume = nnfNodeStorage.Status.Allocations[i].LogicalVolume - mountInfo.Device.LVM.DeviceType = dwsv1alpha2.ClientMountLVMDeviceTypeNVMe + mountInfo.Device.LVM.DeviceType = dwsv1alpha3.ClientMountLVMDeviceTypeNVMe } existingStorage[nnfNodeStorage.Namespace] = append(existingStorage[nnfNodeStorage.Namespace], mountInfo) @@ -641,9 +641,9 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // storageMapping is a map of clients and a list of mounts to perform. It is initialized // with an empty list of mounts for each client - storageMapping := make(map[string][]dwsv1alpha2.ClientMountInfo) + storageMapping := make(map[string][]dwsv1alpha3.ClientMountInfo) for _, client := range clients { - storageMapping[client] = []dwsv1alpha2.ClientMountInfo{} + storageMapping[client] = []dwsv1alpha3.ClientMountInfo{} } // Loop through each Rabbit node in the existingStorage map, and find a client for @@ -655,7 +655,7 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access Namespace: "default", } - storage := &dwsv1alpha2.Storage{} + storage := &dwsv1alpha3.Storage{} err := r.Get(ctx, namespacedName, storage) if err != nil { return nil, err @@ -680,14 +680,14 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access } if len(existingStorage[storageName]) == 0 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("invalid matching between clients and storage. Too many clients for storage").WithWLM().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("invalid matching between clients and storage. Too many clients for storage").WithWLM().WithFatal() } // If target==all, then the client wants to access all the storage it can see switch access.Spec.Target { case "all": storageMapping[client] = append(storageMapping[client], existingStorage[storageName]...) - existingStorage[storageName] = []dwsv1alpha2.ClientMountInfo{} + existingStorage[storageName] = []dwsv1alpha3.ClientMountInfo{} case "single": storageMapping[client] = append(storageMapping[client], existingStorage[storageName][0]) existingStorage[storageName] = existingStorage[storageName][1:] @@ -695,7 +695,7 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access // Allow storages to be re-used for the shared case storageMapping[client] = append(storageMapping[client], existingStorage[storageName][0]) default: - return nil, dwsv1alpha2.NewResourceError("invalid target type '%s'", access.Spec.Target).WithFatal() + return nil, dwsv1alpha3.NewResourceError("invalid target type '%s'", access.Spec.Target).WithFatal() } } } @@ -711,7 +711,7 @@ type mountReference struct { // addNodeStorageEndpoints adds the compute node information to the NnfNodeStorage resource // so it can make the NVMe namespaces accessible on the compute node. This is done on the rabbit // by creating StorageGroup resources through swordfish for the correct endpoint. -func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be added for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -801,7 +801,7 @@ func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access return nil } -func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { +func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) (bool, error) { // NnfNodeStorage clientReferences only need to be checked for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -862,7 +862,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a for _, nnfNodeBlockStorage := range nnfNodeBlockStorages { if nnfNodeBlockStorage.Status.Error != nil { - return false, dwsv1alpha2.NewResourceError("Node: %s", nnfNodeBlockStorage.GetNamespace()).WithError(nnfNodeBlockStorage.Status.Error) + return false, dwsv1alpha3.NewResourceError("Node: %s", nnfNodeBlockStorage.GetNamespace()).WithError(nnfNodeBlockStorage.Status.Error) } } @@ -890,7 +890,7 @@ func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, a // removeNodeStorageEndpoints modifies the NnfNodeStorage resources to remove the client endpoints for the // compute nodes that had mounted the storage. This causes NnfNodeStorage to remove the StorageGroups for // those compute nodes and remove access to the NVMe namespaces from the computes. -func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be removed for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -952,7 +952,7 @@ func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, acce } // manageClientMounts creates or updates the ClientMount resources based on the information in the storageMapping map. -func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nnfv1alpha3.NnfAccess, storageMapping map[string][]dwsv1alpha3.ClientMountInfo) error { log := r.Log.WithValues("NnfAccess", client.ObjectKeyFromObject(access)) if !access.Spec.MakeClientMounts { @@ -960,7 +960,7 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn } if access.Spec.StorageReference.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorage{}).Name() { - return dwsv1alpha2.NewResourceError("invalid StorageReference kind %s", access.Spec.StorageReference.Kind).WithFatal() + return dwsv1alpha3.NewResourceError("invalid StorageReference kind %s", access.Spec.StorageReference.Kind).WithFatal() } nnfStorage := &nnfv1alpha3.NnfStorage{ @@ -971,7 +971,7 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfStorage), nnfStorage); err != nil { - return dwsv1alpha2.NewResourceError("could not get NnfStorage %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("could not get NnfStorage %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err).WithMajor() } // The targetIndex is the directive index of the NnfStorage. This is needed in clientmountd because @@ -988,7 +988,7 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn // Start a goroutine for each ClientMount to create g.Go(func() error { - clientMount := &dwsv1alpha2.ClientMount{ + clientMount := &dwsv1alpha3.ClientMount{ ObjectMeta: metav1.ObjectMeta{ Name: clientMountName(access), Namespace: clientName, @@ -996,13 +996,13 @@ func (r *NnfAccessReconciler) manageClientMounts(ctx context.Context, access *nn } result, err := ctrl.CreateOrUpdate(ctx, r.Client, clientMount, func() error { - dwsv1alpha2.InheritParentLabels(clientMount, access) - dwsv1alpha2.AddOwnerLabels(clientMount, access) + dwsv1alpha3.InheritParentLabels(clientMount, access) + dwsv1alpha3.AddOwnerLabels(clientMount, access) setTargetDirectiveIndexLabel(clientMount, targetIndex) setTargetOwnerUIDLabel(clientMount, string(nnfStorage.GetUID())) clientMount.Spec.Node = clientName - clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountState(access.Spec.DesiredState) + clientMount.Spec.DesiredState = dwsv1alpha3.ClientMountState(access.Spec.DesiredState) clientMount.Spec.Mounts = storageList return nil @@ -1039,15 +1039,15 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * return true, nil } - clientMountList := &dwsv1alpha2.ClientMountList{} - matchLabels := dwsv1alpha2.MatchingOwner(access) + clientMountList := &dwsv1alpha3.ClientMountList{} + matchLabels := dwsv1alpha3.MatchingOwner(access) listOptions := []client.ListOption{ matchLabels, } if err := r.List(ctx, clientMountList, listOptions...); err != nil { - return false, dwsv1alpha2.NewResourceError("could not list ClientMounts").WithError(err) + return false, dwsv1alpha3.NewResourceError("could not list ClientMounts").WithError(err) } // make a map with empty data of the client names to allow easy searching @@ -1056,7 +1056,7 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * clientNameMap[clientName] = struct{}{} } - clientMounts := []dwsv1alpha2.ClientMount{} + clientMounts := []dwsv1alpha3.ClientMount{} for _, clientMount := range clientMountList.Items { if _, exists := clientNameMap[clientMount.GetNamespace()]; exists { clientMounts = append(clientMounts, clientMount) @@ -1066,7 +1066,7 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * // Check the clientmounts for any errors first for _, clientMount := range clientMounts { if clientMount.Status.Error != nil { - return false, dwsv1alpha2.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) + return false, dwsv1alpha3.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) } } @@ -1085,7 +1085,7 @@ func (r *NnfAccessReconciler) getClientMountStatus(ctx context.Context, access * } if clientMount.GetCreationTimestamp().Add(time.Duration(time.Duration(childTimeout) * time.Second)).Before(time.Now()) { - return false, dwsv1alpha2.NewResourceError("Node: %s: ClientMount has not been reconciled after %d seconds", clientMount.GetNamespace(), childTimeout).WithMajor() + return false, dwsv1alpha3.NewResourceError("Node: %s: ClientMount has not been reconciled after %d seconds", clientMount.GetNamespace(), childTimeout).WithMajor() } return false, nil @@ -1152,7 +1152,7 @@ func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o cli requests := []reconcile.Request{} // Ensure the storage resource is updated with the latest NNF Node resource status - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: o.GetName(), Namespace: o.GetNamespace(), @@ -1168,17 +1168,17 @@ func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o cli return []reconcile.Request{} } - ownerName, exists := labels[dwsv1alpha2.OwnerNameLabel] + ownerName, exists := labels[dwsv1alpha3.OwnerNameLabel] if !exists { return []reconcile.Request{} } - ownerNamespace, exists := labels[dwsv1alpha2.OwnerNamespaceLabel] + ownerNamespace, exists := labels[dwsv1alpha3.OwnerNamespaceLabel] if !exists { return []reconcile.Request{} } - ownerKind, exists := labels[dwsv1alpha2.OwnerKindLabel] + ownerKind, exists := labels[dwsv1alpha3.OwnerKindLabel] if !exists { return []reconcile.Request{} } @@ -1186,9 +1186,9 @@ func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o cli // Find all the NnfAccess resource with the same owner as the Computes resource listOptions := []client.ListOption{ client.MatchingLabels(map[string]string{ - dwsv1alpha2.OwnerKindLabel: ownerKind, - dwsv1alpha2.OwnerNameLabel: ownerName, - dwsv1alpha2.OwnerNamespaceLabel: ownerNamespace, + dwsv1alpha3.OwnerKindLabel: ownerKind, + dwsv1alpha3.OwnerNameLabel: ownerName, + dwsv1alpha3.OwnerNamespaceLabel: ownerNamespace, }), } @@ -1207,8 +1207,8 @@ func (r *NnfAccessReconciler) ComputesEnqueueRequests(ctx context.Context, o cli // SetupWithManager sets up the controller with the Manager. func (r *NnfAccessReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ - &dwsv1alpha2.ClientMountList{}, + r.ChildObjects = []dwsv1alpha3.ObjectList{ + &dwsv1alpha3.ClientMountList{}, } // NOTE: NNF Access controller also depends on NNF Storage and NNF Node Storage status' @@ -1231,7 +1231,7 @@ func (r *NnfAccessReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&nnfv1alpha3.NnfAccess{}). - Watches(&dwsv1alpha2.Computes{}, handler.EnqueueRequestsFromMapFunc(r.ComputesEnqueueRequests)). - Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&dwsv1alpha3.Computes{}, handler.EnqueueRequestsFromMapFunc(r.ComputesEnqueueRequests)). + Watches(&dwsv1alpha3.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_access_controller_test.go b/internal/controller/nnf_access_controller_test.go index b78600921..02662a112 100644 --- a/internal/controller/nnf_access_controller_test.go +++ b/internal/controller/nnf_access_controller_test.go @@ -33,7 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -46,7 +46,7 @@ var _ = Describe("Access Controller Test", func() { nnfNodes := [2]*nnfv1alpha3.NnfNode{} nodes := [2]*corev1.Node{} - var systemConfiguration *dwsv1alpha2.SystemConfiguration + var systemConfiguration *dwsv1alpha3.SystemConfiguration var storageProfile *nnfv1alpha3.NnfStorageProfile var setup sync.Once @@ -58,14 +58,14 @@ var _ = Describe("Access Controller Test", func() { } }) - systemConfiguration = &dwsv1alpha2.SystemConfiguration{ + systemConfiguration = &dwsv1alpha3.SystemConfiguration{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.SystemConfigurationSpec{ - StorageNodes: []dwsv1alpha2.SystemConfigurationStorageNode{ + Spec: dwsv1alpha3.SystemConfigurationSpec{ + StorageNodes: []dwsv1alpha3.SystemConfigurationStorageNode{ { Type: "Rabbit", Name: "rabbit-nnf-access-test-node-1", @@ -117,7 +117,7 @@ var _ = Describe("Access Controller Test", func() { return k8sClient.Update(context.TODO(), nnfNodes[i]) }).Should(Succeed(), "set LNet Nid in NnfNode") - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Namespace: corev1.NamespaceDefault, @@ -157,7 +157,7 @@ var _ = Describe("Access Controller Test", func() { } Expect(k8sClient.Delete(context.TODO(), systemConfiguration)).To(Succeed()) - tempConfig := &dwsv1alpha2.SystemConfiguration{} + tempConfig := &dwsv1alpha3.SystemConfiguration{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(systemConfiguration), tempConfig) }).ShouldNot(Succeed()) @@ -291,7 +291,7 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp Spec: nnfv1alpha3.NnfAccessSpec{ DesiredState: "mounted", - TeardownState: dwsv1alpha2.StatePreRun, + TeardownState: dwsv1alpha3.StatePreRun, Target: "all", ClientReference: corev1.ObjectReference{}, MakeClientMounts: true, @@ -318,7 +318,7 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp By("Verify Client Mounts") for _, nodeName := range nodeNames { - mount := &dwsv1alpha2.ClientMount{ + mount := &dwsv1alpha3.ClientMount{ ObjectMeta: metav1.ObjectMeta{ Name: clientMountName(access), Namespace: nodeName, @@ -338,14 +338,14 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp Expect(mount.Spec).To(MatchFields(IgnoreExtras, Fields{ "Node": Equal(nodeName), - "DesiredState": Equal(dwsv1alpha2.ClientMountStateMounted), + "DesiredState": Equal(dwsv1alpha3.ClientMountStateMounted), "Mounts": HaveLen(1), })) Expect(mount.Status.Error).To(BeNil()) Expect(mount.Status.Mounts).To(HaveLen(1)) Expect(mount.Status.Mounts[0]).To(MatchAllFields(Fields{ - "State": Equal(dwsv1alpha2.ClientMountStateMounted), + "State": Equal(dwsv1alpha3.ClientMountStateMounted), "Ready": BeTrue(), })) @@ -372,7 +372,7 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp By("Verify Client Mounts go unmounted") for _, nodeName := range nodeNames { - mount := &dwsv1alpha2.ClientMount{ + mount := &dwsv1alpha3.ClientMount{ ObjectMeta: metav1.ObjectMeta{ Name: clientMountName(access), Namespace: nodeName, @@ -392,13 +392,13 @@ func verifyClientMount(storage *nnfv1alpha3.NnfStorage, storageProfile *nnfv1alp Expect(mount.Spec).To(MatchFields(IgnoreExtras, Fields{ "Node": Equal(nodeName), - "DesiredState": Equal(dwsv1alpha2.ClientMountStateUnmounted), + "DesiredState": Equal(dwsv1alpha3.ClientMountStateUnmounted), "Mounts": HaveLen(1), })) Expect(mount.Status.Error).To(BeNil()) Expect(mount.Status.Mounts).To(HaveLen(1)) Expect(mount.Status.Mounts[0]).To(MatchAllFields(Fields{ - "State": Equal(dwsv1alpha2.ClientMountStateUnmounted), + "State": Equal(dwsv1alpha3.ClientMountStateUnmounted), "Ready": BeTrue(), })) } diff --git a/internal/controller/nnf_clientmount_controller.go b/internal/controller/nnf_clientmount_controller.go index 0a65e052d..ae0dc6bbd 100644 --- a/internal/controller/nnf_clientmount_controller.go +++ b/internal/controller/nnf_clientmount_controller.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -115,7 +115,7 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque metrics.NnfClientMountReconcilesTotal.Inc() - clientMount := &dwsv1alpha2.ClientMount{} + clientMount := &dwsv1alpha3.ClientMount{} if err := r.Get(ctx, req.NamespacedName, clientMount); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -124,7 +124,7 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Create a status updater that handles the call to status().Update() if any of the fields // in clientMount.Status change - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.ClientMountStatus](clientMount) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.ClientMountStatus](clientMount) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { clientMount.Status.SetResourceErrorAndLog(err, log) }() @@ -136,7 +136,7 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Unmount everything before removing the finalizer log.Info("Unmounting all file systems due to resource deletion") - if err := r.changeMountAll(ctx, clientMount, dwsv1alpha2.ClientMountStateUnmounted); err != nil { + if err := r.changeMountAll(ctx, clientMount, dwsv1alpha3.ClientMountStateUnmounted); err != nil { return ctrl.Result{}, err } @@ -154,7 +154,7 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Create the status section if it doesn't exist yet if len(clientMount.Status.Mounts) != len(clientMount.Spec.Mounts) { - clientMount.Status.Mounts = make([]dwsv1alpha2.ClientMountInfoStatus, len(clientMount.Spec.Mounts)) + clientMount.Status.Mounts = make([]dwsv1alpha3.ClientMountInfoStatus, len(clientMount.Spec.Mounts)) } // Initialize the status section if the desired state doesn't match the status state @@ -186,7 +186,7 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque clientMount.Status.AllReady = false if err := r.changeMountAll(ctx, clientMount, clientMount.Spec.DesiredState); err != nil { - resourceError := dwsv1alpha2.NewResourceError("mount/unmount failed").WithError(err) + resourceError := dwsv1alpha3.NewResourceError("mount/unmount failed").WithError(err) log.Info(resourceError.Error()) return ctrl.Result{}, resourceError @@ -198,18 +198,18 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // changeMmountAll mounts or unmounts all the file systems listed in the spec.Mounts list -func (r *NnfClientMountReconciler) changeMountAll(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, state dwsv1alpha2.ClientMountState) error { +func (r *NnfClientMountReconciler) changeMountAll(ctx context.Context, clientMount *dwsv1alpha3.ClientMount, state dwsv1alpha3.ClientMountState) error { var firstError error for i := range clientMount.Spec.Mounts { var err error switch state { - case dwsv1alpha2.ClientMountStateMounted: + case dwsv1alpha3.ClientMountStateMounted: err = r.changeMount(ctx, clientMount, i, true) - case dwsv1alpha2.ClientMountStateUnmounted: + case dwsv1alpha3.ClientMountStateUnmounted: err = r.changeMount(ctx, clientMount, i, false) default: - return dwsv1alpha2.NewResourceError("invalid desired state %s", state).WithFatal() + return dwsv1alpha3.NewResourceError("invalid desired state %s", state).WithFatal() } if err != nil { @@ -226,24 +226,24 @@ func (r *NnfClientMountReconciler) changeMountAll(ctx context.Context, clientMou } // changeMount mount or unmounts a single mount point described in the ClientMountInfo object -func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int, shouldMount bool) error { +func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount *dwsv1alpha3.ClientMount, index int, shouldMount bool) error { log := r.Log.WithValues("ClientMount", client.ObjectKeyFromObject(clientMount), "index", index) clientMountInfo := clientMount.Spec.Mounts[index] nnfNodeStorage, err := r.fakeNnfNodeStorage(ctx, clientMount, index) if err != nil { - return dwsv1alpha2.NewResourceError("unable to build NnfNodeStorage").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to build NnfNodeStorage").WithError(err).WithMajor() } blockDevice, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, clientMountInfo.Device.DeviceReference.Data, log) if err != nil { - return dwsv1alpha2.NewResourceError("unable to get file system information").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to get file system information").WithError(err).WithMajor() } if shouldMount { activated, err := blockDevice.Activate(ctx) if err != nil { - return dwsv1alpha2.NewResourceError("unable to activate block device").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to activate block device").WithError(err).WithMajor() } if activated { log.Info("Activated block device", "block device path", blockDevice.GetDevice()) @@ -251,7 +251,7 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount mounted, err := fileSystem.Mount(ctx, clientMountInfo.MountPath, clientMount.Status.Mounts[index].Ready) if err != nil { - return dwsv1alpha2.NewResourceError("unable to mount file system").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to mount file system").WithError(err).WithMajor() } if mounted { log.Info("Mounted file system", "Mount path", clientMountInfo.MountPath) @@ -259,7 +259,7 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount if clientMount.Spec.Mounts[index].SetPermissions { if err := os.Chown(clientMountInfo.MountPath, int(clientMount.Spec.Mounts[index].UserID), int(clientMount.Spec.Mounts[index].GroupID)); err != nil { - return dwsv1alpha2.NewResourceError("unable to set owner and group for file system").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to set owner and group for file system").WithError(err).WithMajor() } // If we're setting permissions then we know this is only happening once. Dump the @@ -269,7 +269,7 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount if clientMount.Spec.Mounts[index].Type == "lustre" { serversFilepath := filepath.Join(clientMountInfo.MountPath, lustreServersFilepath) if err := r.dumpServersToFile(ctx, clientMount, serversFilepath, clientMount.Spec.Mounts[index].UserID, clientMount.Spec.Mounts[index].GroupID); err != nil { - return dwsv1alpha2.NewResourceError("unable to dump servers resource to file on clientmount path").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to dump servers resource to file on clientmount path").WithError(err).WithMajor() } } } @@ -277,7 +277,7 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount } else { unmounted, err := fileSystem.Unmount(ctx, clientMountInfo.MountPath) if err != nil { - return dwsv1alpha2.NewResourceError("unable to unmount file system").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to unmount file system").WithError(err).WithMajor() } if unmounted { log.Info("Unmounted file system", "Mount path", clientMountInfo.MountPath) @@ -291,7 +291,7 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount } deactivated, err := blockDevice.Deactivate(ctx, fullDeactivate) if err != nil { - return dwsv1alpha2.NewResourceError("unable to deactivate block device").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to deactivate block device").WithError(err).WithMajor() } if deactivated { log.Info("Deactivated block device", "block device path", blockDevice.GetDevice()) @@ -302,30 +302,30 @@ func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount } // Retrieve the Servers resource for the workflow and write it to a dotfile on the mount path for compute users to retrieve -func (r *NnfClientMountReconciler) dumpServersToFile(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, path string, uid, gid uint32) error { +func (r *NnfClientMountReconciler) dumpServersToFile(ctx context.Context, clientMount *dwsv1alpha3.ClientMount, path string, uid, gid uint32) error { // Get the NnfServers Resource server, err := r.getServerForClientMount(ctx, clientMount) if err != nil { - return dwsv1alpha2.NewResourceError("could not retrieve corresponding NnfServer resource for this ClientMount").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("could not retrieve corresponding NnfServer resource for this ClientMount").WithError(err).WithMajor() } // Dump server resource to file on mountpoint (e.g. .nnf-lustre) file, err := os.Create(path) if err != nil { - return dwsv1alpha2.NewResourceError("could not create servers file").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("could not create servers file").WithError(err).WithMajor() } defer file.Close() encoder := json.NewEncoder(file) err = encoder.Encode(createLustreMapping(server)) if err != nil { - return dwsv1alpha2.NewResourceError("could not write JSON to file").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("could not write JSON to file").WithError(err).WithMajor() } // Change permissions to user if err := os.Chown(path, int(uid), int(gid)); err != nil { - return dwsv1alpha2.NewResourceError("unable to set owner and group").WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to set owner and group").WithError(err).WithMajor() } return nil @@ -339,20 +339,20 @@ func (r *NnfClientMountReconciler) dumpServersToFile(ctx context.Context, client // 2. PersistentStorageInstance (persistent storage case) // // Once we understand who owns the NnfStorage resource, we can then obtain the NnfServer resource through slightly different methods. -func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, clientMount *dwsv1alpha2.ClientMount) (*dwsv1alpha2.Servers, error) { +func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, clientMount *dwsv1alpha3.ClientMount) (*dwsv1alpha3.Servers, error) { storageKind := "NnfStorage" persistentKind := "PersistentStorageInstance" workflowKind := "Workflow" // Get the owner and directive index from ClientMount's labels - ownerKind, ownerExists := clientMount.Labels[dwsv1alpha2.OwnerKindLabel] - ownerName, ownerNameExists := clientMount.Labels[dwsv1alpha2.OwnerNameLabel] - ownerNS, ownerNSExists := clientMount.Labels[dwsv1alpha2.OwnerNamespaceLabel] + ownerKind, ownerExists := clientMount.Labels[dwsv1alpha3.OwnerKindLabel] + ownerName, ownerNameExists := clientMount.Labels[dwsv1alpha3.OwnerNameLabel] + ownerNS, ownerNSExists := clientMount.Labels[dwsv1alpha3.OwnerNamespaceLabel] _, idxExists := clientMount.Labels[nnfv1alpha3.DirectiveIndexLabel] // We should expect the owner of the ClientMount to be NnfStorage and have the expected labels if !ownerExists || !ownerNameExists || !ownerNSExists || !idxExists || ownerKind != storageKind { - return nil, dwsv1alpha2.NewResourceError("expected ClientMount owner to be of kind NnfStorage and have the expected labels").WithMajor() + return nil, dwsv1alpha3.NewResourceError("expected ClientMount owner to be of kind NnfStorage and have the expected labels").WithMajor() } // Retrieve the NnfStorage resource @@ -363,19 +363,19 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, }, } if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable retrieve NnfStorage resource").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("unable retrieve NnfStorage resource").WithError(err).WithMajor() } // Get the owner and directive index from NnfStorage's labels - ownerKind, ownerExists = storage.Labels[dwsv1alpha2.OwnerKindLabel] - ownerName, ownerNameExists = storage.Labels[dwsv1alpha2.OwnerNameLabel] - ownerNS, ownerNSExists = storage.Labels[dwsv1alpha2.OwnerNamespaceLabel] + ownerKind, ownerExists = storage.Labels[dwsv1alpha3.OwnerKindLabel] + ownerName, ownerNameExists = storage.Labels[dwsv1alpha3.OwnerNameLabel] + ownerNS, ownerNSExists = storage.Labels[dwsv1alpha3.OwnerNamespaceLabel] idx, idxExists := storage.Labels[nnfv1alpha3.DirectiveIndexLabel] // We should expect the owner of the NnfStorage to be Workflow or PersistentStorageInstance and // have the expected labels if !ownerExists || !ownerNameExists || !ownerNSExists || !idxExists || (ownerKind != workflowKind && ownerKind != persistentKind) { - return nil, dwsv1alpha2.NewResourceError("expected NnfStorage owner to be of kind Workflow or PersistentStorageInstance and have the expected labels").WithMajor() + return nil, dwsv1alpha3.NewResourceError("expected NnfStorage owner to be of kind Workflow or PersistentStorageInstance and have the expected labels").WithMajor() } // If the owner is a workflow, then we can use the workflow labels and directive index to get @@ -384,8 +384,8 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, if ownerKind == workflowKind { listOptions = []client.ListOption{ client.MatchingLabels(map[string]string{ - dwsv1alpha2.WorkflowNameLabel: ownerName, - dwsv1alpha2.WorkflowNamespaceLabel: ownerNS, + dwsv1alpha3.WorkflowNameLabel: ownerName, + dwsv1alpha3.WorkflowNamespaceLabel: ownerNS, nnfv1alpha3.DirectiveIndexLabel: idx, }), } @@ -394,21 +394,21 @@ func (r *NnfClientMountReconciler) getServerForClientMount(ctx context.Context, // labels. It also will not have a directive index. listOptions = []client.ListOption{ client.MatchingLabels(map[string]string{ - dwsv1alpha2.OwnerKindLabel: ownerKind, - dwsv1alpha2.OwnerNameLabel: ownerName, - dwsv1alpha2.OwnerNamespaceLabel: ownerNS, + dwsv1alpha3.OwnerKindLabel: ownerKind, + dwsv1alpha3.OwnerNameLabel: ownerName, + dwsv1alpha3.OwnerNamespaceLabel: ownerNS, }), } } - serversList := &dwsv1alpha2.ServersList{} + serversList := &dwsv1alpha3.ServersList{} if err := r.List(ctx, serversList, listOptions...); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable retrieve NnfServers resource").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("unable retrieve NnfServers resource").WithError(err).WithMajor() } // We should only have 1 if len(serversList.Items) != 1 { - return nil, dwsv1alpha2.NewResourceError(fmt.Sprintf("wrong number of NnfServers resources: expected 1, got %d", len(serversList.Items))).WithMajor() + return nil, dwsv1alpha3.NewResourceError(fmt.Sprintf("wrong number of NnfServers resources: expected 1, got %d", len(serversList.Items))).WithMajor() } return &serversList.Items[0], nil @@ -428,7 +428,7 @@ Flatten the AllocationSets to create mapping for lustre information. Example: ] } */ -func createLustreMapping(server *dwsv1alpha2.Servers) map[string][]string { +func createLustreMapping(server *dwsv1alpha3.Servers) map[string][]string { m := map[string][]string{} @@ -449,7 +449,7 @@ func createLustreMapping(server *dwsv1alpha2.Servers) map[string][]string { // fakeNnfNodeStorage creates an NnfNodeStorage resource filled in with only the fields // that are necessary to mount the file system. This is done to reduce the API server load // because the compute nodes don't need to Get() the actual NnfNodeStorage. -func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int) (*nnfv1alpha3.NnfNodeStorage, error) { +func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clientMount *dwsv1alpha3.ClientMount, index int) (*nnfv1alpha3.NnfNodeStorage, error) { nnfNodeStorage := &nnfv1alpha3.NnfNodeStorage{ ObjectMeta: metav1.ObjectMeta{ Name: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Name, @@ -460,10 +460,10 @@ func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clien // These labels aren't exactly right (NnfStorage owns NnfNodeStorage), but the // labels that are important for doing the mount are there and correct - dwsv1alpha2.InheritParentLabels(nnfNodeStorage, clientMount) + dwsv1alpha3.InheritParentLabels(nnfNodeStorage, clientMount) labels := nnfNodeStorage.GetLabels() labels[nnfv1alpha3.DirectiveIndexLabel] = getTargetDirectiveIndexLabel(clientMount) - labels[dwsv1alpha2.OwnerUidLabel] = getTargetOwnerUIDLabel(clientMount) + labels[dwsv1alpha3.OwnerUidLabel] = getTargetOwnerUIDLabel(clientMount) nnfNodeStorage.SetLabels(labels) nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ @@ -489,7 +489,7 @@ func (r *NnfClientMountReconciler) fakeNnfNodeStorage(ctx context.Context, clien nnfStorageProfile, err := getPinnedStorageProfileFromLabel(ctx, r.Client, nnfNodeStorage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to find pinned storage profile").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("unable to find pinned storage profile").WithError(err).WithMajor() } switch nnfNodeStorage.Spec.FileSystemType { @@ -518,7 +518,7 @@ func (r *NnfClientMountReconciler) SetupWithManager(mgr ctrl.Manager) error { maxReconciles := runtime.GOMAXPROCS(0) builder := ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.ClientMount{}) + For(&dwsv1alpha3.ClientMount{}) if _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT"); found { builder = builder.WithEventFilter(filterByRabbitNamespacePrefixForTest()) diff --git a/internal/controller/nnf_clientmount_controller_test.go b/internal/controller/nnf_clientmount_controller_test.go index 4eb2066b4..c9fda56fe 100644 --- a/internal/controller/nnf_clientmount_controller_test.go +++ b/internal/controller/nnf_clientmount_controller_test.go @@ -23,31 +23,31 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" ) var _ = Describe("Clientmount Controller Test", func() { It("It should correctly create a human-readable lustre mapping for Servers ", func() { - s := dwsv1alpha2.Servers{ - Status: dwsv1alpha2.ServersStatus{ - AllocationSets: []dwsv1alpha2.ServersStatusAllocationSet{ - {Label: "ost", Storage: map[string]dwsv1alpha2.ServersStatusStorage{ - "rabbit-node-1": dwsv1alpha2.ServersStatusStorage{ + s := dwsv1alpha3.Servers{ + Status: dwsv1alpha3.ServersStatus{ + AllocationSets: []dwsv1alpha3.ServersStatusAllocationSet{ + {Label: "ost", Storage: map[string]dwsv1alpha3.ServersStatusStorage{ + "rabbit-node-1": dwsv1alpha3.ServersStatusStorage{ AllocationSize: 123345, }, - "rabbit-node-2": dwsv1alpha2.ServersStatusStorage{ + "rabbit-node-2": dwsv1alpha3.ServersStatusStorage{ AllocationSize: 123345, }, }}, - {Label: "mdt", Storage: map[string]dwsv1alpha2.ServersStatusStorage{ - "rabbit-node-3": dwsv1alpha2.ServersStatusStorage{ + {Label: "mdt", Storage: map[string]dwsv1alpha3.ServersStatusStorage{ + "rabbit-node-3": dwsv1alpha3.ServersStatusStorage{ AllocationSize: 123345, }, - "rabbit-node-4": dwsv1alpha2.ServersStatusStorage{ + "rabbit-node-4": dwsv1alpha3.ServersStatusStorage{ AllocationSize: 123345, }, - "rabbit-node-8": dwsv1alpha2.ServersStatusStorage{ + "rabbit-node-8": dwsv1alpha3.ServersStatusStorage{ AllocationSize: 123345, }, }}, diff --git a/internal/controller/nnf_lustre_mgt_controller.go b/internal/controller/nnf_lustre_mgt_controller.go index df71976d6..5ad1d05d2 100644 --- a/internal/controller/nnf_lustre_mgt_controller.go +++ b/internal/controller/nnf_lustre_mgt_controller.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -65,7 +65,7 @@ type NnfLustreMGTReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList ControllerType ControllerType } @@ -138,7 +138,7 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request // the FsNameStart field in the spec. if nnfLustreMgt.Spec.FsNameStartReference != (corev1.ObjectReference{}) { if nnfLustreMgt.Spec.FsNameStartReference.Kind != reflect.TypeOf(corev1.ConfigMap{}).Name() { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("lustre MGT start reference does not have kind '%s'", reflect.TypeOf(corev1.ConfigMap{}).Name()).WithFatal().WithUser() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("lustre MGT start reference does not have kind '%s'", reflect.TypeOf(corev1.ConfigMap{}).Name()).WithFatal().WithUser() } configMap := &corev1.ConfigMap{ @@ -149,13 +149,13 @@ func (r *NnfLustreMGTReconciler) Reconcile(ctx context.Context, req ctrl.Request } if err := r.Get(ctx, client.ObjectKeyFromObject(configMap), configMap); err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get Lustre MGT start fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("could not get Lustre MGT start fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() } if configMap.Data != nil { if _, exists := configMap.Data["NextFsName"]; exists { if len(configMap.Data["NextFsName"]) != 8 { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("starting fsname from config map: %v was not 8 characters", client.ObjectKeyFromObject(configMap)).WithError(err).WithFatal() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("starting fsname from config map: %v was not 8 characters", client.ObjectKeyFromObject(configMap)).WithError(err).WithFatal() } fsnameNext = configMap.Data["NextFsName"] @@ -250,7 +250,7 @@ func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt // of the next fsname if nnfLustreMgt.Spec.FsNameStartReference != (corev1.ObjectReference{}) { if nnfLustreMgt.Spec.FsNameStartReference.Kind != reflect.TypeOf(corev1.ConfigMap{}).Name() { - return nil, dwsv1alpha2.NewResourceError("lustre MGT start reference does not have kind '%s'", reflect.TypeOf(corev1.ConfigMap{}).Name()).WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("lustre MGT start reference does not have kind '%s'", reflect.TypeOf(corev1.ConfigMap{}).Name()).WithFatal().WithUser() } // Get used fsname Config map @@ -262,7 +262,7 @@ func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt } if err := r.Get(ctx, client.ObjectKeyFromObject(configMap), configMap); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get Lustre MGT start fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get Lustre MGT start fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() } if configMap.Data == nil { @@ -274,7 +274,7 @@ func (r *NnfLustreMGTReconciler) SetFsNameNext(ctx context.Context, nnfLustreMgt if apierrors.IsConflict(err) { return &ctrl.Result{Requeue: true}, nil } - return nil, dwsv1alpha2.NewResourceError("could not update Lustre MGT used fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not update Lustre MGT used fsname config map: %v", client.ObjectKeyFromObject(configMap)).WithError(err).WithMajor() } } @@ -356,7 +356,7 @@ func (r *NnfLustreMGTReconciler) EraseOldFsName(nnfLustreMgt *nnfv1alpha3.NnfLus return nil } - return dwsv1alpha2.NewResourceError("unable to remove fsname '%s' from MGT", fsname).WithError(err).WithMajor() + return dwsv1alpha3.NewResourceError("unable to remove fsname '%s' from MGT", fsname).WithError(err).WithMajor() } } } diff --git a/internal/controller/nnf_node_block_storage_controller.go b/internal/controller/nnf_node_block_storage_controller.go index 8fa3eb3c9..3cf639676 100644 --- a/internal/controller/nnf_node_block_storage_controller.go +++ b/internal/controller/nnf_node_block_storage_controller.go @@ -53,7 +53,7 @@ import ( openapi "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/common" sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -239,7 +239,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. // Allocate physical storage result, err := r.allocateStorage(nodeBlockStorage, i) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to allocate NVMe namespaces for allocation %v", i).WithError(err).WithMajor() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("unable to allocate NVMe namespaces for allocation %v", i).WithError(err).WithMajor() } if result != nil { return *result, nil @@ -248,7 +248,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. // Create a block device in /dev that is accessible on the Rabbit node result, err = r.createBlockDevice(ctx, nodeBlockStorage, i) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to attache NVMe namespace to node for allocation %v", i).WithError(err).WithMajor() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("unable to attache NVMe namespace to node for allocation %v", i).WithError(err).WithMajor() } if result != nil { return *result, nil @@ -264,7 +264,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. } if err := r.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get pod: %v", client.ObjectKeyFromObject(pod)).WithError(err) + return ctrl.Result{}, dwsv1alpha3.NewResourceError("could not get pod: %v", client.ObjectKeyFromObject(pod)).WithError(err) } // Set the start time of the pod that did the reconcile. This allows us to detect when the Rabbit node has @@ -275,7 +275,7 @@ func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl. } if container.State.Running == nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("pod not in state running: %v", client.ObjectKeyFromObject(pod)).WithError(err).WithMajor() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("pod not in state running: %v", client.ObjectKeyFromObject(pod)).WithError(err).WithMajor() } nodeBlockStorage.Status.PodStartTime = container.State.Running.StartedAt @@ -298,7 +298,7 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a storagePoolID := getStoragePoolID(nodeBlockStorage, index) sp, err := r.createStoragePool(ss, storagePoolID, nodeBlockStorage.Spec.Allocations[index].Capacity) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not create storage pool").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create storage pool").WithError(err).WithMajor() } vc := &sf.VolumeCollectionVolumeCollection{} @@ -311,7 +311,7 @@ func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1a } if len(allocationStatus.Devices) != len(vc.Members) { - return nil, dwsv1alpha2.NewResourceError("unexpected number of namespaces").WithFatal() + return nil, dwsv1alpha3.NewResourceError("unexpected number of namespaces").WithFatal() } for i, member := range vc.Members { @@ -359,7 +359,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n // Retrieve the collection of endpoints for us to map serverEndpointCollection := &sf.EndpointCollectionEndpointCollection{} if err := ss.StorageServiceIdEndpointsGet(ss.Id(), serverEndpointCollection); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get service endpoint").WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not get service endpoint").WithError(err).WithFatal() } // Get the Storage resource to map between compute node name and @@ -369,10 +369,10 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n Namespace: "default", } - storage := &dwsv1alpha2.Storage{} + storage := &dwsv1alpha3.Storage{} err := r.Get(ctx, namespacedName, storage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not read storage resource").WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not read storage resource").WithError(err) } // Build a list of all nodes with access to the storage @@ -410,7 +410,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n } if err := r.deleteStorageGroup(ss, storageGroupId); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not delete storage group").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not delete storage group").WithError(err).WithMajor() } for oldNodeName, accessStatus := range allocationStatus.Accesses { @@ -429,7 +429,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n endPoint, err := r.getEndpoint(ss, endpointID) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get endpoint").WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not get endpoint").WithError(err).WithFatal() } // Skip the endpoints that are not ready @@ -439,7 +439,7 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n sg, err := r.createStorageGroup(ss, storageGroupId, allocationStatus.StoragePoolId, endpointID) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not create storage group").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create storage group").WithError(err).WithMajor() } if allocationStatus.Accesses == nil { @@ -491,10 +491,10 @@ func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, n if path == "" { err := nvme.NvmeRescanDevices(log) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not rescan devices after failing to find device path for %v", allocatedDevice).WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not rescan devices after failing to find device path for %v", allocatedDevice).WithError(err).WithMajor() } - return nil, dwsv1alpha2.NewResourceError("could not find device path for %v", allocatedDevice).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not find device path for %v", allocatedDevice).WithMajor() } allocationStatus.Accesses[nodeName].DevicePaths[i] = path @@ -521,7 +521,7 @@ func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alp // If the error is from a 404 error, then there's nothing to clean up and we // assume everything has been deleted if !ok || ecErr.StatusCode() != http.StatusNotFound { - nodeBlockStorage.Status.Error = dwsv1alpha2.NewResourceError("could not delete storage pool").WithError(err).WithFatal() + nodeBlockStorage.Status.Error = dwsv1alpha3.NewResourceError("could not delete storage pool").WithError(err).WithFatal() log.Info(nodeBlockStorage.Status.Error.Error()) return &ctrl.Result{Requeue: true}, nil @@ -546,7 +546,7 @@ func (r *NnfNodeBlockStorageReconciler) createStoragePool(ss nnf.StorageServiceA } if err := ss.StorageServiceIdStoragePoolIdPut(ss.Id(), id, sp); err != nil { - resourceErr := dwsv1alpha2.NewResourceError("could not allocate storage pool").WithError(err) + resourceErr := dwsv1alpha3.NewResourceError("could not allocate storage pool").WithError(err) ecErr, ok := err.(*ec.ControllerError) if ok { switch ecErr.Cause() { diff --git a/internal/controller/nnf_node_controller.go b/internal/controller/nnf_node_controller.go index a673e13cf..43ce2f5bc 100644 --- a/internal/controller/nnf_node_controller.go +++ b/internal/controller/nnf_node_controller.go @@ -51,7 +51,7 @@ import ( sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" "github.com/NearNodeFlash/nnf-sos/pkg/command" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -282,7 +282,7 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re return ctrl.Result{}, err } - systemConfig := &dwsv1alpha2.SystemConfiguration{} + systemConfig := &dwsv1alpha3.SystemConfiguration{} if err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: corev1.NamespaceDefault}, systemConfig); err != nil { log.Info("Could not get system configuration") return ctrl.Result{}, nil @@ -501,7 +501,7 @@ func (r *NnfNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&nnfv1alpha3.NnfNode{}). Owns(&corev1.Namespace{}). // The node will create a namespace for itself, so it can watch changes to the NNF Node custom resource - Watches(&dwsv1alpha2.SystemConfiguration{}, handler.EnqueueRequestsFromMapFunc(systemConfigurationMapFunc)). + Watches(&dwsv1alpha3.SystemConfiguration{}, handler.EnqueueRequestsFromMapFunc(systemConfigurationMapFunc)). WatchesRawSource(&source.Channel{Source: r.Events}, &handler.EnqueueRequestForObject{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller.go b/internal/controller/nnf_node_storage_controller.go index 94dc8dc2b..aa5ce2d78 100644 --- a/internal/controller/nnf_node_storage_controller.go +++ b/internal/controller/nnf_node_storage_controller.go @@ -37,7 +37,7 @@ import ( "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" "github.com/NearNodeFlash/nnf-sos/pkg/filesystem" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -62,7 +62,7 @@ type NnfNodeStorageReconciler struct { SemaphoreForDone chan struct{} types.NamespacedName - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList sync.Mutex started bool @@ -209,7 +209,7 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque result, err := r.createAllocations(ctx, nnfNodeStorage, blockDevices, fileSystems) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to create storage allocation").WithError(err).WithMajor() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("unable to create storage allocation").WithError(err).WithMajor() } if result != nil { return *result, nil @@ -239,13 +239,13 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode // If we never successfully completed creating the allocation or if it's already gone, then don't try to run PreDeactivate blockDeviceExists, err := blockDevice.CheckExists(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not check if block device exists").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not check if block device exists").WithError(err).WithMajor() } if blockDeviceExists && nnfNodeStorage.Status.Allocations[index].Ready { ran, err := blockDevice.Activate(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not activate block devices").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not activate block devices").WithError(err).WithMajor() } if ran { log.Info("Activated block device", "allocation", index) @@ -253,7 +253,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err = fileSystem.Activate(ctx, false) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not activate file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not activate file system").WithError(err).WithMajor() } if ran { log.Info("Activated file system", "allocation", index) @@ -261,7 +261,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err = fileSystem.PreDeactivate(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not run pre deactivate for file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not run pre deactivate for file system").WithError(err).WithMajor() } if ran { log.Info("Pre deactivate file system", "allocation", index) @@ -270,7 +270,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err := fileSystem.Deactivate(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not deactivate file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not deactivate file system").WithError(err).WithMajor() } if ran { log.Info("Deactivated file system", "allocation", index) @@ -278,7 +278,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err = fileSystem.Destroy(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not destroy file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not destroy file system").WithError(err).WithMajor() } if ran { log.Info("Destroyed file system", "allocation", index) @@ -286,7 +286,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err = blockDevice.Deactivate(ctx, false) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not deactivate block devices").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not deactivate block devices").WithError(err).WithMajor() } if ran { log.Info("Deactivated block device", "allocation", index) @@ -294,7 +294,7 @@ func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNode ran, err = blockDevice.Destroy(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not destroy block devices").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not destroy block devices").WithError(err).WithMajor() } if ran { log.Info("Destroyed block device", "allocation", index) @@ -316,7 +316,7 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod ran, err := blockDevice.Create(ctx, allocationStatus.Ready) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not create block devices").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create block devices").WithError(err).WithMajor() } if ran { log.Info("Created block device", "allocation", index) @@ -324,7 +324,7 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod _, err = blockDevice.Activate(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not activate block devices").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not activate block devices").WithError(err).WithMajor() } deferIndex := index @@ -341,7 +341,7 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod ran, err := fileSystem.Create(ctx, allocationStatus.Ready) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not create file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create file system").WithError(err).WithMajor() } if ran { log.Info("Created file system", "allocation", index) @@ -349,7 +349,7 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod ran, err = fileSystem.Activate(ctx, allocationStatus.Ready) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not activate file system").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not activate file system").WithError(err).WithMajor() } if ran { log.Info("Activated file system", "allocation", index) @@ -357,7 +357,7 @@ func (r *NnfNodeStorageReconciler) createAllocations(ctx context.Context, nnfNod ran, err = fileSystem.PostActivate(ctx, allocationStatus.Ready) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not run post activate").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not run post activate").WithError(err).WithMajor() } if ran { log.Info("Post activate file system", "allocation", index) diff --git a/internal/controller/nnf_persistentstorageinstance_controller.go b/internal/controller/nnf_persistentstorageinstance_controller.go index 421da5ced..2a4323388 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller.go +++ b/internal/controller/nnf_persistentstorageinstance_controller.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" @@ -52,7 +52,7 @@ type PersistentStorageReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=persistentstorageinstances,verbs=get;list;watch;create;update;patch;delete @@ -70,7 +70,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re metrics.NnfPersistentStorageReconcilesTotal.Inc() - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{} + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{} if err := r.Get(ctx, req.NamespacedName, persistentStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -78,7 +78,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.PersistentStorageInstanceStatus](persistentStorage) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.PersistentStorageInstanceStatus](persistentStorage) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { persistentStorage.Status.SetResourceError(err) }() @@ -94,7 +94,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Delete all NnfStorage and Servers children that are owned by this PersistentStorage. - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, persistentStorage) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, persistentStorage) if err != nil { return ctrl.Result{}, err } @@ -130,7 +130,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } if persistentStorage.Status.State == "" { - persistentStorage.Status.State = dwsv1alpha2.PSIStateCreating + persistentStorage.Status.State = dwsv1alpha3.PSIStateCreating } argsMap, err := dwdparse.BuildArgsMap(persistentStorage.Spec.DWDirective) @@ -150,7 +150,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re // If this PersistentStorageInstance is for a standalone MGT, add a label so it can be easily found if argsMap["type"] == "lustre" && len(pinnedProfile.Data.LustreStorage.StandaloneMGTPoolName) > 0 { if _, exists := argsMap["capacity"]; exists { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithUserMessage("creating persistent MGT does not accept 'capacity' argument").WithFatal().WithUser() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("").WithUserMessage("creating persistent MGT does not accept 'capacity' argument").WithFatal().WithUser() } labels := persistentStorage.GetLabels() if _, ok := labels[nnfv1alpha3.StandaloneMGTLabel]; !ok { @@ -166,7 +166,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } } else { if _, exists := argsMap["capacity"]; !exists { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("").WithUserMessage("creating persistent storage requires 'capacity' argument").WithFatal().WithUser() + return ctrl.Result{}, dwsv1alpha3.NewResourceError("").WithUserMessage("creating persistent storage requires 'capacity' argument").WithFatal().WithUser() } } @@ -181,16 +181,16 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } persistentStorage.Status.Servers = v1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Servers{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Servers{}).Name(), Name: servers.Name, Namespace: servers.Namespace, } - if persistentStorage.Spec.State == dwsv1alpha2.PSIStateDestroying { + if persistentStorage.Spec.State == dwsv1alpha3.PSIStateDestroying { if len(persistentStorage.Spec.ConsumerReferences) == 0 { - persistentStorage.Status.State = dwsv1alpha2.PSIStateDestroying + persistentStorage.Status.State = dwsv1alpha3.PSIStateDestroying } - } else if persistentStorage.Spec.State == dwsv1alpha2.PSIStateActive { + } else if persistentStorage.Spec.State == dwsv1alpha3.PSIStateActive { // Wait for the NnfStorage to be ready before marking the persistent storage // state as "active" nnfStorage := &nnfv1alpha3.NnfStorage{} @@ -212,16 +212,16 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re } if complete == true { - persistentStorage.Status.State = dwsv1alpha2.PSIStateActive + persistentStorage.Status.State = dwsv1alpha3.PSIStateActive } } return ctrl.Result{}, err } -func (r *PersistentStorageReconciler) createServers(ctx context.Context, persistentStorage *dwsv1alpha2.PersistentStorageInstance) (*dwsv1alpha2.Servers, error) { +func (r *PersistentStorageReconciler) createServers(ctx context.Context, persistentStorage *dwsv1alpha3.PersistentStorageInstance) (*dwsv1alpha3.Servers, error) { log := r.Log.WithValues("PersistentStorage", client.ObjectKeyFromObject(persistentStorage)) - server := &dwsv1alpha2.Servers{ + server := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.Name, Namespace: persistentStorage.Namespace, @@ -231,8 +231,8 @@ func (r *PersistentStorageReconciler) createServers(ctx context.Context, persist // Create the Servers resource with owner labels and PersistentStorage labels result, err := ctrl.CreateOrUpdate(ctx, r.Client, server, func() error { - dwsv1alpha2.AddOwnerLabels(server, persistentStorage) - dwsv1alpha2.AddPersistentStorageLabels(server, persistentStorage) + dwsv1alpha3.AddOwnerLabels(server, persistentStorage) + dwsv1alpha3.AddPersistentStorageLabels(server, persistentStorage) return ctrl.SetControllerReference(persistentStorage, server, r.Scheme) }) @@ -260,17 +260,17 @@ func (r *PersistentStorageReconciler) createServers(ctx context.Context, persist // SetupWithManager sets up the controller with the Manager. func (r *PersistentStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ + r.ChildObjects = []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfStorageList{}, - &dwsv1alpha2.ServersList{}, + &dwsv1alpha3.ServersList{}, &nnfv1alpha3.NnfStorageProfileList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.PersistentStorageInstance{}). - Owns(&dwsv1alpha2.Servers{}). + For(&dwsv1alpha3.PersistentStorageInstance{}). + Owns(&dwsv1alpha3.Servers{}). Owns(&nnfv1alpha3.NnfStorage{}). Owns(&nnfv1alpha3.NnfStorageProfile{}). Complete(r) diff --git a/internal/controller/nnf_persistentstorageinstance_controller_test.go b/internal/controller/nnf_persistentstorageinstance_controller_test.go index 7d3e922d1..f15dd6910 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller_test.go +++ b/internal/controller/nnf_persistentstorageinstance_controller_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -53,27 +53,27 @@ var _ = Describe("PersistentStorage test", func() { It("Creates a PersistentStorageInstance", func() { By("Creating a PersistentStorageInstance") - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: "persistent-test", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.PersistentStorageInstanceSpec{ + Spec: dwsv1alpha3.PersistentStorageInstanceSpec{ Name: "persistent_lustre", DWDirective: "#DW create_persistent name=persistent_lustre type=lustre capacity=1GiB", FsType: "lustre", UserID: 999, - State: dwsv1alpha2.PSIStateActive, + State: dwsv1alpha3.PSIStateActive, }, } Expect(k8sClient.Create(context.TODO(), persistentStorage)).To(Succeed()) - Eventually(func(g Gomega) dwsv1alpha2.PersistentStorageInstanceState { + Eventually(func(g Gomega) dwsv1alpha3.PersistentStorageInstanceState { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentStorage), persistentStorage)).To(Succeed()) return persistentStorage.Status.State - }).Should(Equal(dwsv1alpha2.PSIStateCreating)) + }).Should(Equal(dwsv1alpha3.PSIStateCreating)) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: persistentStorage.GetName(), Namespace: persistentStorage.GetNamespace(), @@ -107,15 +107,15 @@ var _ = Describe("PersistentStorage test", func() { By("Marking the persistentStorage as destroying") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentStorage), persistentStorage)).To(Succeed()) - persistentStorage.Spec.State = dwsv1alpha2.PSIStateDestroying + persistentStorage.Spec.State = dwsv1alpha3.PSIStateDestroying return k8sClient.Update(context.TODO(), persistentStorage) }).Should(Succeed(), "Set as destroying") - Eventually(func(g Gomega) dwsv1alpha2.PersistentStorageInstanceState { + Eventually(func(g Gomega) dwsv1alpha3.PersistentStorageInstanceState { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentStorage), persistentStorage)).To(Succeed()) return persistentStorage.Status.State - }).Should(Equal(dwsv1alpha2.PSIStateCreating)) + }).Should(Equal(dwsv1alpha3.PSIStateCreating)) By("Removing consumer reference") Eventually(func(g Gomega) error { @@ -125,10 +125,10 @@ var _ = Describe("PersistentStorage test", func() { return k8sClient.Update(context.TODO(), persistentStorage) }).Should(Succeed(), "Remove fake consumer reference") - Eventually(func(g Gomega) dwsv1alpha2.PersistentStorageInstanceState { + Eventually(func(g Gomega) dwsv1alpha3.PersistentStorageInstanceState { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(persistentStorage), persistentStorage)).To(Succeed()) return persistentStorage.Status.State - }).Should(Equal(dwsv1alpha2.PSIStateDestroying)) + }).Should(Equal(dwsv1alpha3.PSIStateDestroying)) By("Deleting the PersistentStorageInstance") Expect(k8sClient.Delete(context.TODO(), persistentStorage)).To(Succeed()) diff --git a/internal/controller/nnf_port_manager_controller.go b/internal/controller/nnf_port_manager_controller.go index f31333a8c..52978e073 100644 --- a/internal/controller/nnf_port_manager_controller.go +++ b/internal/controller/nnf_port_manager_controller.go @@ -35,7 +35,7 @@ import ( "github.com/DataWorkflowServices/dws/utils/updater" "github.com/go-logr/logr" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -75,7 +75,7 @@ func (r *NnfPortManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Read in the system configuration which contains the available ports. - config := &dwsv1alpha2.SystemConfiguration{ + config := &dwsv1alpha3.SystemConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: mgr.Spec.SystemConfiguration.Name, Namespace: mgr.Spec.SystemConfiguration.Namespace, @@ -236,7 +236,7 @@ func (r *NnfPortManagerReconciler) isAllocated(mgr *nnfv1alpha3.NnfPortManager, } // Find free ports to satisfy the provided specification. -func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha3.NnfPortManager, config *dwsv1alpha2.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha3.NnfPortManagerAllocationStatusStatus) { +func (r *NnfPortManagerReconciler) findFreePorts(log logr.Logger, mgr *nnfv1alpha3.NnfPortManager, config *dwsv1alpha3.SystemConfiguration, spec AllocationSpec) ([]uint16, nnfv1alpha3.NnfPortManagerAllocationStatusStatus) { portsInUse := make([]uint16, 0) for _, status := range mgr.Status.Allocations { diff --git a/internal/controller/nnf_port_manager_controller_test.go b/internal/controller/nnf_port_manager_controller_test.go index c5508fa3c..7bca417ca 100644 --- a/internal/controller/nnf_port_manager_controller_test.go +++ b/internal/controller/nnf_port_manager_controller_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -46,17 +46,17 @@ var _ = Context("NNF Port Manager Controller Setup", Ordered, func() { portTotal := portEnd - portStart + 1 Describe("NNF Port Manager Controller Test", func() { - var cfg *dwsv1alpha2.SystemConfiguration + var cfg *dwsv1alpha3.SystemConfiguration var mgr *nnfv1alpha3.NnfPortManager portCooldown := 1 JustBeforeEach(func() { - cfg = &dwsv1alpha2.SystemConfiguration{ + cfg = &dwsv1alpha3.SystemConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.SystemConfigurationSpec{ + Spec: dwsv1alpha3.SystemConfigurationSpec{ Ports: []intstr.IntOrString{ intstr.FromString(fmt.Sprintf("%d-%d", portStart, portEnd)), }, diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index 360ae828b..8fd69c2f3 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -53,7 +53,7 @@ type NnfStorageReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } const ( @@ -257,7 +257,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, persistentMgsReference corev1.ObjectReference) error { - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, Namespace: persistentMgsReference.Namespace, @@ -265,11 +265,11 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context } if err := r.Get(ctx, client.ObjectKeyFromObject(persistentStorage), persistentStorage); err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("PersistentStorage '%v' not found", client.ObjectKeyFromObject(persistentStorage)).WithMajor() + return dwsv1alpha3.NewResourceError("").WithUserMessage("PersistentStorage '%v' not found", client.ObjectKeyFromObject(persistentStorage)).WithMajor() } - if persistentStorage.Status.State != dwsv1alpha2.PSIStateActive { - return dwsv1alpha2.NewResourceError("").WithUserMessage("PersistentStorage is not active").WithFatal() + if persistentStorage.Status.State != dwsv1alpha3.PSIStateActive { + return dwsv1alpha3.NewResourceError("").WithUserMessage("PersistentStorage is not active").WithFatal() } // Add a consumer reference to the persistent storage for this directive @@ -291,7 +291,7 @@ func (r *NnfStorageReconciler) addPersistentStorageReference(ctx context.Context } func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage, persistentMgsReference corev1.ObjectReference) error { - persistentStorage := &dwsv1alpha2.PersistentStorageInstance{ + persistentStorage := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: persistentMgsReference.Name, Namespace: persistentMgsReference.Namespace, @@ -336,8 +336,8 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfNodeBlockStorage, func() error { - dwsv1alpha2.InheritParentLabels(nnfNodeBlockStorage, nnfStorage) - dwsv1alpha2.AddOwnerLabels(nnfNodeBlockStorage, nnfStorage) + dwsv1alpha3.InheritParentLabels(nnfNodeBlockStorage, nnfStorage) + dwsv1alpha3.AddOwnerLabels(nnfNodeBlockStorage, nnfStorage) labels := nnfNodeBlockStorage.GetLabels() labels[nnfv1alpha3.AllocationSetLabel] = allocationSet.Name @@ -354,7 +354,7 @@ func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfSt } if len(nnfNodeBlockStorage.Spec.Allocations) != expectedAllocations { - return dwsv1alpha2.NewResourceError("block storage allocation count incorrect. found %v, expected %v", len(nnfNodeBlockStorage.Spec.Allocations), expectedAllocations).WithFatal() + return dwsv1alpha3.NewResourceError("block storage allocation count incorrect. found %v, expected %v", len(nnfNodeBlockStorage.Spec.Allocations), expectedAllocations).WithFatal() } for i := range nnfNodeBlockStorage.Spec.Allocations { @@ -407,7 +407,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte allocationSet.AllocationCount = 0 nnfNodeBlockStorageList := &nnfv1alpha3.NnfNodeBlockStorageList{} - matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) + matchLabels := dwsv1alpha3.MatchingOwner(nnfStorage) matchLabels[nnfv1alpha3.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ @@ -415,7 +415,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte } if err := r.List(ctx, nnfNodeBlockStorageList, listOptions...); err != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not list NnfNodeBlockStorages").WithError(err) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("could not list NnfNodeBlockStorages").WithError(err) } // make a map with empty data of the Rabbit names to allow easy searching @@ -443,7 +443,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte for _, nnfNodeBlockStorage := range nnfNodeBlockStorages { if nnfNodeBlockStorage.Status.Error != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("Node: %s", nnfNodeBlockStorage.GetNamespace()).WithError(nnfNodeBlockStorage.Status.Error) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("Node: %s", nnfNodeBlockStorage.GetNamespace()).WithError(nnfNodeBlockStorage.Status.Error) } } @@ -462,7 +462,7 @@ func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Conte } if nnfNodeBlockStorage.GetCreationTimestamp().Add(time.Duration(time.Duration(childTimeout) * time.Second)).Before(time.Now()) { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("Node: %s: NnfNodeBlockStorage has not been reconciled after %d seconds", nnfNodeBlockStorage.GetNamespace(), childTimeout).WithMajor() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("Node: %s: NnfNodeBlockStorage has not been reconciled after %d seconds", nnfNodeBlockStorage.GetNamespace(), childTimeout).WithMajor() } return &ctrl.Result{RequeueAfter: time.Minute}, nil @@ -522,7 +522,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNode), nnfNode); err != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get NnfNode: %v", client.ObjectKeyFromObject(nnfNode)).WithError(err) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("could not get NnfNode: %v", client.ObjectKeyFromObject(nnfNode)).WithError(err) } mgsAddress = nnfNode.Status.LNetNid @@ -546,11 +546,11 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n }, } - dwsv1alpha2.InheritParentLabels(nnfLustreMgt, storage) - dwsv1alpha2.AddOwnerLabels(nnfLustreMgt, storage) + dwsv1alpha3.InheritParentLabels(nnfLustreMgt, storage) + dwsv1alpha3.AddOwnerLabels(nnfLustreMgt, storage) if err := r.Create(ctx, nnfLustreMgt); err != nil { if !apierrors.IsAlreadyExists(err) { - return nil, dwsv1alpha2.NewResourceError("could not create NnfLustreMGT").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not create NnfLustreMGT").WithError(err).WithMajor() } } else { log.Info("Created NnfLustreMGT", "Name", nnfLustreMgt.Name, "Namespace", nnfLustreMgt.Namespace) @@ -562,7 +562,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n if fsname == "" && !(len(storage.Spec.AllocationSets) == 1 && storage.Spec.AllocationSets[0].Name == "mgt") { fsname, err := r.getFsName(ctx, storage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get available fsname").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not get available fsname").WithError(err).WithMajor() } if fsname == "" { return &ctrl.Result{Requeue: true}, nil @@ -585,8 +585,8 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfNodeStorage, func() error { - dwsv1alpha2.InheritParentLabels(nnfNodeStorage, storage) - dwsv1alpha2.AddOwnerLabels(nnfNodeStorage, storage) + dwsv1alpha3.InheritParentLabels(nnfNodeStorage, storage) + dwsv1alpha3.AddOwnerLabels(nnfNodeStorage, storage) labels := nnfNodeStorage.GetLabels() labels[nnfv1alpha3.AllocationSetLabel] = allocationSet.Name @@ -646,7 +646,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) nnfNodeStorageList := &nnfv1alpha3.NnfNodeStorageList{} - matchLabels := dwsv1alpha2.MatchingOwner(storage) + matchLabels := dwsv1alpha3.MatchingOwner(storage) matchLabels[nnfv1alpha3.AllocationSetLabel] = storage.Spec.AllocationSets[allocationSetIndex].Name listOptions := []client.ListOption{ @@ -654,7 +654,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s } if err := r.List(ctx, nnfNodeStorageList, listOptions...); err != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not list NnfNodeStorages").WithError(err) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("could not list NnfNodeStorages").WithError(err) } // make a map with empty data of the Rabbit names to allow easy searching @@ -679,7 +679,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s continue } if nnfNodeStorage.Status.Error != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("Node: %s", nnfNodeStorage.GetNamespace()).WithError(nnfNodeStorage.Status.Error) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("Node: %s", nnfNodeStorage.GetNamespace()).WithError(nnfNodeStorage.Status.Error) } } @@ -698,7 +698,7 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s } if nnfNodeStorage.GetCreationTimestamp().Add(time.Duration(time.Duration(childTimeout) * time.Second)).Before(time.Now()) { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("Node: %s: NnfNodeStorage has not been reconciled after %d seconds", nnfNodeStorage.GetNamespace(), childTimeout).WithMajor() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("Node: %s: NnfNodeStorage has not been reconciled after %d seconds", nnfNodeStorage.GetNamespace(), childTimeout).WithMajor() } return &ctrl.Result{RequeueAfter: time.Minute}, nil @@ -736,7 +736,7 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfLustreMgt), nnfLustreMgt); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not get nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) } return nnfLustreMgt, nil @@ -744,7 +744,7 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf nnfLustreMgtList := &nnfv1alpha3.NnfLustreMGTList{} if err := r.List(ctx, nnfLustreMgtList, []client.ListOption{}...); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not list NnfLustreMGTs").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not list NnfLustreMGTs").WithError(err).WithMajor() } var nnfLustreMgt *nnfv1alpha3.NnfLustreMGT = nil @@ -761,14 +761,14 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf } if nnfLustreMgt != nil { - return nil, dwsv1alpha2.NewResourceError("multiple MGTs found for address %s", nnfStorage.Status.MgsAddress).WithFatal().WithWLM() + return nil, dwsv1alpha3.NewResourceError("multiple MGTs found for address %s", nnfStorage.Status.MgsAddress).WithFatal().WithWLM() } nnfLustreMgt = &nnfLustreMgtList.Items[i] } if nnfLustreMgt == nil { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("no NnfLustreMGT resource found for MGS address: %s", nnfStorage.Status.MgsAddress).WithMajor() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("no NnfLustreMGT resource found for MGS address: %s", nnfStorage.Status.MgsAddress).WithMajor() } return nnfLustreMgt, nil @@ -777,7 +777,7 @@ func (r *NnfStorageReconciler) getLustreMgt(ctx context.Context, nnfStorage *nnf func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1alpha3.NnfStorage) (string, error) { nnfLustreMgt, err := r.getLustreMgt(ctx, nnfStorage) if err != nil { - return "", dwsv1alpha2.NewResourceError("could not get NnfLustreMGT for address: %s", nnfStorage.Status.MgsAddress).WithError(err) + return "", dwsv1alpha3.NewResourceError("could not get NnfLustreMGT for address: %s", nnfStorage.Status.MgsAddress).WithError(err) } // Save the reference to the NnfLustreMGT resource in the NnfStorage before adding an fsname claim @@ -819,7 +819,7 @@ func (r *NnfStorageReconciler) getFsName(ctx context.Context, nnfStorage *nnfv1a return "", nil } - return "", dwsv1alpha2.NewResourceError("could not update NnfLustreMGT").WithError(err).WithMajor() + return "", dwsv1alpha3.NewResourceError("could not update NnfLustreMGT").WithError(err).WithMajor() } return "", nil @@ -841,7 +841,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora } if nnfStorage.Spec.FileSystemType != "lustre" { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("invalid file system type '%s' for setLustreOwnerGroup", nnfStorage.Spec.FileSystemType).WithFatal() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("invalid file system type '%s' for setLustreOwnerGroup", nnfStorage.Spec.FileSystemType).WithFatal() } // If this NnfStorage is for a standalone MGT, then we don't need to set the owner and group @@ -859,12 +859,12 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora }() if index == -1 { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("no ost allocation set").WithFatal() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("no ost allocation set").WithFatal() } allocationSet := nnfStorage.Spec.AllocationSets[index] if len(allocationSet.Nodes) == 0 { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("zero length node array for OST").WithFatal() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("zero length node array for OST").WithFatal() } tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") @@ -872,7 +872,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora tempMountDir = "/mnt/tmp/" } - clientMount := &dwsv1alpha2.ClientMount{ + clientMount := &dwsv1alpha3.ClientMount{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-ownergroup", nnfStorage.Name), Namespace: allocationSet.Nodes[0].Name, @@ -881,7 +881,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora if err := r.Get(ctx, client.ObjectKeyFromObject(clientMount), clientMount); err != nil { if !apierrors.IsNotFound(err) { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get clientmount for setting lustre owner/group").WithError(err).WithMajor() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("could not get clientmount for setting lustre owner/group").WithError(err).WithMajor() } index := func() int { for i, allocationSet := range nnfStorage.Spec.AllocationSets { @@ -893,12 +893,12 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora }() if index == -1 { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("no ost allocation set").WithFatal() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("no ost allocation set").WithFatal() } allocationSet := nnfStorage.Spec.AllocationSets[index] if len(allocationSet.Nodes) == 0 { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("zero length node array for OST").WithFatal() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("zero length node array for OST").WithFatal() } tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") @@ -906,23 +906,23 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora tempMountDir = "/mnt/tmp/" } - dwsv1alpha2.InheritParentLabels(clientMount, nnfStorage) - dwsv1alpha2.AddOwnerLabels(clientMount, nnfStorage) + dwsv1alpha3.InheritParentLabels(clientMount, nnfStorage) + dwsv1alpha3.AddOwnerLabels(clientMount, nnfStorage) clientMount.Spec.Node = allocationSet.Nodes[0].Name - clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountStateMounted - clientMount.Spec.Mounts = []dwsv1alpha2.ClientMountInfo{ - dwsv1alpha2.ClientMountInfo{ + clientMount.Spec.DesiredState = dwsv1alpha3.ClientMountStateMounted + clientMount.Spec.Mounts = []dwsv1alpha3.ClientMountInfo{ + dwsv1alpha3.ClientMountInfo{ Type: nnfStorage.Spec.FileSystemType, TargetType: "directory", MountPath: fmt.Sprintf("/%s/%s", tempMountDir, nnfNodeStorageName(nnfStorage, index, 0)), - Device: dwsv1alpha2.ClientMountDevice{ - Type: dwsv1alpha2.ClientMountDeviceTypeLustre, - Lustre: &dwsv1alpha2.ClientMountDeviceLustre{ + Device: dwsv1alpha3.ClientMountDevice{ + Type: dwsv1alpha3.ClientMountDeviceTypeLustre, + Lustre: &dwsv1alpha3.ClientMountDeviceLustre{ FileSystemName: nnfStorage.Status.FileSystemName, MgsAddresses: nnfStorage.Status.MgsAddress, }, - DeviceReference: &dwsv1alpha2.ClientMountDeviceReference{ + DeviceReference: &dwsv1alpha3.ClientMountDeviceReference{ ObjectReference: corev1.ObjectReference{ Name: nnfNodeStorageName(nnfStorage, index, 0), Namespace: allocationSet.Nodes[0].Name, @@ -937,7 +937,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora } if err := r.Create(ctx, clientMount); err != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create lustre owner/group ClientMount resource").WithError(err).WithMajor() + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("could not create lustre owner/group ClientMount resource").WithError(err).WithMajor() } log.Info("Created clientMount for setting Lustre owner/group") @@ -946,7 +946,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora } if clientMount.Status.Error != nil { - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) + return &ctrl.Result{}, dwsv1alpha3.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) } if len(clientMount.Status.Mounts) == 0 { @@ -954,12 +954,12 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora } switch clientMount.Status.Mounts[0].State { - case dwsv1alpha2.ClientMountStateMounted: + case dwsv1alpha3.ClientMountStateMounted: if clientMount.Status.Mounts[0].Ready == false { return &ctrl.Result{}, nil } - clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountStateUnmounted + clientMount.Spec.DesiredState = dwsv1alpha3.ClientMountStateUnmounted if err := r.Update(ctx, clientMount); err != nil { if !apierrors.IsConflict(err) { return &ctrl.Result{}, err @@ -971,7 +971,7 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora log.Info("Updated clientMount to unmount Lustre owner/group mount") return &ctrl.Result{}, nil - case dwsv1alpha2.ClientMountStateUnmounted: + case dwsv1alpha3.ClientMountStateUnmounted: if clientMount.Status.Mounts[0].Ready == false { return &ctrl.Result{}, nil } @@ -986,15 +986,15 @@ func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStora // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, storage *nnfv1alpha3.NnfStorage, deleting bool) error { - clientMountList := &dwsv1alpha2.ClientMountList{} - matchLabels := dwsv1alpha2.MatchingOwner(storage) + clientMountList := &dwsv1alpha3.ClientMountList{} + matchLabels := dwsv1alpha3.MatchingOwner(storage) listOptions := []client.ListOption{ matchLabels, } if err := r.List(ctx, clientMountList, listOptions...); err != nil { - return dwsv1alpha2.NewResourceError("could not list ClientMounts").WithError(err) + return dwsv1alpha3.NewResourceError("could not list ClientMounts").WithError(err) } for _, clientMount := range clientMountList.Items { @@ -1004,7 +1004,7 @@ func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, s continue } if clientMount.Status.Error != nil { - return dwsv1alpha2.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) + return dwsv1alpha3.NewResourceError("Node: %s", clientMount.GetNamespace()).WithError(clientMount.Status.Error) } } @@ -1017,7 +1017,7 @@ func (r *NnfStorageReconciler) aggregateClientMountStatus(ctx context.Context, s // to the NnfStorage. func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnfv1alpha3.NnfStorage) (nodeStoragesState, error) { // Delete any clientmounts that were created by the NnfStorage. - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, []dwsv1alpha2.ObjectList{&dwsv1alpha2.ClientMountList{}}, storage) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, []dwsv1alpha3.ObjectList{&dwsv1alpha3.ClientMountList{}}, storage) if err != nil { return nodeStoragesExist, err } @@ -1033,16 +1033,16 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf if storage.Spec.FileSystemType == "lustre" { // Delete the OSTs and MDTs first so we can drop the claim on the NnfLustreMgt resource. This will trigger // an lctl command to run to remove the fsname from the MGT. - childObjects := []dwsv1alpha2.ObjectList{ + childObjects := []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfNodeStorageList{}, } - ostDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "ost"}) + ostDeleteStatus, err := dwsv1alpha3.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "ost"}) if err != nil { return nodeStoragesExist, err } - mdtDeleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "mdt"}) + mdtDeleteStatus, err := dwsv1alpha3.DeleteChildrenWithLabels(ctx, r.Client, childObjects, storage, client.MatchingLabels{nnfv1alpha3.AllocationSetLabel: "mdt"}) if err != nil { return nodeStoragesExist, err } @@ -1064,7 +1064,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf // since this may be an MGT made as part of a jobdw released, err := r.releaseLustreMgt(ctx, storage) if err != nil { - return nodeStoragesExist, dwsv1alpha2.NewResourceError("could not release LustreMGT resource").WithError(err) + return nodeStoragesExist, dwsv1alpha3.NewResourceError("could not release LustreMGT resource").WithError(err) } if !released { @@ -1083,7 +1083,7 @@ func (r *NnfStorageReconciler) teardownStorage(ctx context.Context, storage *nnf } // Delete any remaining child objects including the MGT allocation set for Lustre - deleteStatus, err = dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, storage) + deleteStatus, err = dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, storage) if err != nil { return nodeStoragesExist, err } @@ -1127,7 +1127,7 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn return true, nil } - return false, dwsv1alpha2.NewResourceError("could not get nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) + return false, dwsv1alpha3.NewResourceError("could not get nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) } // Remove our claim from the spec section. @@ -1136,7 +1136,7 @@ func (r *NnfStorageReconciler) releaseLustreMgt(ctx context.Context, storage *nn nnfLustreMgt.Spec.ClaimList = append(nnfLustreMgt.Spec.ClaimList[:i], nnfLustreMgt.Spec.ClaimList[i+1:]...) if err := r.Update(ctx, nnfLustreMgt); err != nil { - return false, dwsv1alpha2.NewResourceError("could not remove reference from nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) + return false, dwsv1alpha3.NewResourceError("could not remove reference from nnfLustreMgt: %v", client.ObjectKeyFromObject(nnfLustreMgt)).WithError(err) } return false, nil @@ -1178,8 +1178,8 @@ func nnfNodeStorageName(storage *nnfv1alpha3.NnfStorage, allocationSetIndex int, // SetupWithManager sets up the controller with the Manager. func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ - &dwsv1alpha2.ClientMountList{}, + r.ChildObjects = []dwsv1alpha3.ObjectList{ + &dwsv1alpha3.ClientMountList{}, &nnfv1alpha3.NnfNodeStorageList{}, &nnfv1alpha3.NnfNodeBlockStorageList{}, &nnfv1alpha3.NnfLustreMGTList{}, @@ -1190,8 +1190,8 @@ func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&nnfv1alpha3.NnfStorage{}). - Watches(&nnfv1alpha3.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha3.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha3.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). + Watches(&nnfv1alpha3.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). + Watches(&dwsv1alpha3.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_systemconfiguration_controller.go b/internal/controller/nnf_systemconfiguration_controller.go index 566307ee8..5d916d69b 100644 --- a/internal/controller/nnf_systemconfiguration_controller.go +++ b/internal/controller/nnf_systemconfiguration_controller.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -67,7 +67,7 @@ type NnfSystemConfigurationReconciler struct { func (r *NnfSystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { metrics.NnfSystemConfigurationReconcilesTotal.Inc() - systemConfiguration := &dwsv1alpha2.SystemConfiguration{} + systemConfiguration := &dwsv1alpha3.SystemConfiguration{} if err := r.Get(ctx, req.NamespacedName, systemConfiguration); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -77,7 +77,7 @@ func (r *NnfSystemConfigurationReconciler) Reconcile(ctx context.Context, req ct // Create a status updater that handles the call to r.Status().Update() if any of the fields // in systemConfiguration.Status{} change - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.SystemConfigurationStatus](systemConfiguration) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.SystemConfigurationStatus](systemConfiguration) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() // Handle cleanup if the resource is being deleted @@ -147,7 +147,7 @@ func (r *NnfSystemConfigurationReconciler) Reconcile(ctx context.Context, req ct // createNamespaces creates a namespace for each entry in the validNamespaces map. The // namespaces have a "name" and "namespace" label for the SystemConfiguration owner. -func (r *NnfSystemConfigurationReconciler) createNamespaces(ctx context.Context, config *dwsv1alpha2.SystemConfiguration, validNamespaces map[string]struct{}) error { +func (r *NnfSystemConfigurationReconciler) createNamespaces(ctx context.Context, config *dwsv1alpha3.SystemConfiguration, validNamespaces map[string]struct{}) error { for name := range validNamespaces { namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -156,7 +156,7 @@ func (r *NnfSystemConfigurationReconciler) createNamespaces(ctx context.Context, } _, err := ctrl.CreateOrUpdate(ctx, r.Client, namespace, func() error { - dwsv1alpha2.AddOwnerLabels(namespace, config) + dwsv1alpha3.AddOwnerLabels(namespace, config) return nil }) if err != nil { @@ -314,9 +314,9 @@ func (r *NnfSystemConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) er builder := ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 1}). - Watches(&corev1.Namespace{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&corev1.Namespace{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). Watches(&corev1.Node{}, handler.EnqueueRequestsFromMapFunc(nodeMapFunc)). - For(&dwsv1alpha2.SystemConfiguration{}) + For(&dwsv1alpha3.SystemConfiguration{}) return builder.Complete(r) } diff --git a/internal/controller/nnf_systemconfiguration_controller_test.go b/internal/controller/nnf_systemconfiguration_controller_test.go index a9d2e8868..a07f111d6 100644 --- a/internal/controller/nnf_systemconfiguration_controller_test.go +++ b/internal/controller/nnf_systemconfiguration_controller_test.go @@ -30,12 +30,12 @@ import ( "k8s.io/kubernetes/pkg/util/taints" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) var _ = Describe("NnfSystemconfigurationController", func() { - var sysCfg *dwsv1alpha2.SystemConfiguration + var sysCfg *dwsv1alpha3.SystemConfiguration AfterEach(func() { Expect(k8sClient.Delete(context.TODO(), sysCfg)).To(Succeed()) @@ -45,17 +45,17 @@ var _ = Describe("NnfSystemconfigurationController", func() { }) When("creating a SystemConfiguration", func() { - sysCfg = &dwsv1alpha2.SystemConfiguration{ + sysCfg = &dwsv1alpha3.SystemConfiguration{ ObjectMeta: v1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.SystemConfigurationSpec{ - StorageNodes: []dwsv1alpha2.SystemConfigurationStorageNode{ + Spec: dwsv1alpha3.SystemConfigurationSpec{ + StorageNodes: []dwsv1alpha3.SystemConfigurationStorageNode{ { Type: "Rabbit", Name: "rabbit1", - ComputesAccess: []dwsv1alpha2.SystemConfigurationComputeNodeReference{ + ComputesAccess: []dwsv1alpha3.SystemConfigurationComputeNodeReference{ { Name: "test-compute-0", Index: 0, @@ -77,7 +77,7 @@ var _ = Describe("NnfSystemconfigurationController", func() { }) var _ = Describe("Adding taints and labels to nodes", func() { - var sysCfg *dwsv1alpha2.SystemConfiguration + var sysCfg *dwsv1alpha3.SystemConfiguration taintNoSchedule := &corev1.Taint{ Key: nnfv1alpha3.RabbitNodeTaintKey, @@ -108,13 +108,13 @@ var _ = Describe("Adding taints and labels to nodes", func() { } BeforeEach(func() { - sysCfg = &dwsv1alpha2.SystemConfiguration{ + sysCfg = &dwsv1alpha3.SystemConfiguration{ ObjectMeta: v1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.SystemConfigurationSpec{ - StorageNodes: []dwsv1alpha2.SystemConfigurationStorageNode{ + Spec: dwsv1alpha3.SystemConfigurationSpec{ + StorageNodes: []dwsv1alpha3.SystemConfigurationStorageNode{ { Type: "Rabbit", Name: "rabbit1", diff --git a/internal/controller/nnf_workflow_controller.go b/internal/controller/nnf_workflow_controller.go index f2da5c31d..574f8e9e9 100644 --- a/internal/controller/nnf_workflow_controller.go +++ b/internal/controller/nnf_workflow_controller.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/dwdparse" "github.com/DataWorkflowServices/dws/utils/updater" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" @@ -64,7 +64,7 @@ type NnfWorkflowReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=workflows,verbs=get;list;watch;update;patch @@ -99,7 +99,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) metrics.NnfWorkflowReconcilesTotal.Inc() // Fetch the Workflow instance - workflow := &dwsv1alpha2.Workflow{} + workflow := &dwsv1alpha3.Workflow{} if err := r.Get(ctx, req.NamespacedName, workflow); err != nil { // ignore not-found errors, since they can't be fixed by an immediate @@ -108,7 +108,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, client.IgnoreNotFound(err) } - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.WorkflowStatus](workflow) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.WorkflowStatus](workflow) defer func() { err = statusUpdater.CloseWithUpdate(ctx, r, err) }() driverID := os.Getenv("DWS_DRIVER_ID") @@ -141,7 +141,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, workflow) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, workflow) if err != nil { return ctrl.Result{}, err } @@ -188,7 +188,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create a list of the driverStatus array elements that correspond to the current state // of the workflow and are targeted for the Rabbit driver - driverList := []*dwsv1alpha2.WorkflowDriverStatus{} + driverList := []*dwsv1alpha3.WorkflowDriverStatus{} for i := range workflow.Status.Drivers { driverStatus := &workflow.Status.Drivers[i] @@ -206,14 +206,14 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) driverList = append(driverList, driverStatus) } - startFunctions := map[dwsv1alpha2.WorkflowState]func(*NnfWorkflowReconciler, context.Context, *dwsv1alpha2.Workflow, int) (*result, error){ - dwsv1alpha2.StateProposal: (*NnfWorkflowReconciler).startProposalState, - dwsv1alpha2.StateSetup: (*NnfWorkflowReconciler).startSetupState, - dwsv1alpha2.StateDataIn: (*NnfWorkflowReconciler).startDataInOutState, - dwsv1alpha2.StatePreRun: (*NnfWorkflowReconciler).startPreRunState, - dwsv1alpha2.StatePostRun: (*NnfWorkflowReconciler).startPostRunState, - dwsv1alpha2.StateDataOut: (*NnfWorkflowReconciler).startDataInOutState, - dwsv1alpha2.StateTeardown: (*NnfWorkflowReconciler).startTeardownState, + startFunctions := map[dwsv1alpha3.WorkflowState]func(*NnfWorkflowReconciler, context.Context, *dwsv1alpha3.Workflow, int) (*result, error){ + dwsv1alpha3.StateProposal: (*NnfWorkflowReconciler).startProposalState, + dwsv1alpha3.StateSetup: (*NnfWorkflowReconciler).startSetupState, + dwsv1alpha3.StateDataIn: (*NnfWorkflowReconciler).startDataInOutState, + dwsv1alpha3.StatePreRun: (*NnfWorkflowReconciler).startPreRunState, + dwsv1alpha3.StatePostRun: (*NnfWorkflowReconciler).startPostRunState, + dwsv1alpha3.StateDataOut: (*NnfWorkflowReconciler).startDataInOutState, + dwsv1alpha3.StateTeardown: (*NnfWorkflowReconciler).startTeardownState, } // Call the correct "start" function based on workflow state for each directive that has registered for @@ -222,7 +222,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) log := log.WithValues("state", workflow.Status.State, "index", driverStatus.DWDIndex) log.Info("Start", "directive", workflow.Spec.DWDirectives[driverStatus.DWDIndex]) - driverStatus.Status = dwsv1alpha2.StatusRunning + driverStatus.Status = dwsv1alpha3.StatusRunning driverStatus.Message = "" driverStatus.Error = "" @@ -243,14 +243,14 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) log.Info("Start done") } - finishFunctions := map[dwsv1alpha2.WorkflowState]func(*NnfWorkflowReconciler, context.Context, *dwsv1alpha2.Workflow, int) (*result, error){ - dwsv1alpha2.StateProposal: (*NnfWorkflowReconciler).finishProposalState, - dwsv1alpha2.StateSetup: (*NnfWorkflowReconciler).finishSetupState, - dwsv1alpha2.StateDataIn: (*NnfWorkflowReconciler).finishDataInOutState, - dwsv1alpha2.StatePreRun: (*NnfWorkflowReconciler).finishPreRunState, - dwsv1alpha2.StatePostRun: (*NnfWorkflowReconciler).finishPostRunState, - dwsv1alpha2.StateDataOut: (*NnfWorkflowReconciler).finishDataInOutState, - dwsv1alpha2.StateTeardown: (*NnfWorkflowReconciler).finishTeardownState, + finishFunctions := map[dwsv1alpha3.WorkflowState]func(*NnfWorkflowReconciler, context.Context, *dwsv1alpha3.Workflow, int) (*result, error){ + dwsv1alpha3.StateProposal: (*NnfWorkflowReconciler).finishProposalState, + dwsv1alpha3.StateSetup: (*NnfWorkflowReconciler).finishSetupState, + dwsv1alpha3.StateDataIn: (*NnfWorkflowReconciler).finishDataInOutState, + dwsv1alpha3.StatePreRun: (*NnfWorkflowReconciler).finishPreRunState, + dwsv1alpha3.StatePostRun: (*NnfWorkflowReconciler).finishPostRunState, + dwsv1alpha3.StateDataOut: (*NnfWorkflowReconciler).finishDataInOutState, + dwsv1alpha3.StateTeardown: (*NnfWorkflowReconciler).finishTeardownState, } // Call the correct "finish" function based on workflow state for each directive that has registered for @@ -260,7 +260,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) log := log.WithValues("state", workflow.Status.State, "index", driverStatus.DWDIndex) log.Info("Finish", "directive", workflow.Spec.DWDirectives[driverStatus.DWDIndex]) - driverStatus.Status = dwsv1alpha2.StatusRunning + driverStatus.Status = dwsv1alpha3.StatusRunning driverStatus.Message = "" driverStatus.Error = "" @@ -284,7 +284,7 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) log.Info("Finish done") ts := metav1.NowMicro() - driverStatus.Status = dwsv1alpha2.StatusCompleted + driverStatus.Status = dwsv1alpha3.StatusCompleted driverStatus.Message = "" driverStatus.Error = "" driverStatus.CompleteTime = &ts @@ -294,12 +294,12 @@ func (r *NnfWorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -func (r *NnfWorkflowReconciler) startProposalState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startProposalState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { log := r.Log.WithValues("Workflow", client.ObjectKeyFromObject(workflow), "Index", index) dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err := r.validateWorkflow(ctx, workflow); err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("unable to validate DW directives") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("unable to validate DW directives") } // only jobdw, persistentdw, and create_persistent need a directive breakdown @@ -324,7 +324,7 @@ func (r *NnfWorkflowReconciler) startProposalState(ctx context.Context, workflow directiveBreakdown, err := r.generateDirectiveBreakdown(ctx, index, workflow, log) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not generate DirectiveBreakdown").WithError(err).WithUserMessage("unable to start parsing DW directive") + return nil, dwsv1alpha3.NewResourceError("could not generate DirectiveBreakdown").WithError(err).WithUserMessage("unable to start parsing DW directive") } if directiveBreakdown == nil { @@ -332,7 +332,7 @@ func (r *NnfWorkflowReconciler) startProposalState(ctx context.Context, workflow } directiveBreakdownReference := v1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.DirectiveBreakdown{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.DirectiveBreakdown{}).Name(), Name: directiveBreakdown.Name, Namespace: directiveBreakdown.Namespace, } @@ -351,7 +351,7 @@ func (r *NnfWorkflowReconciler) startProposalState(ctx context.Context, workflow return nil, nil } -func (r *NnfWorkflowReconciler) finishProposalState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishProposalState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) // only jobdw, persistentdw, and create_persistent have a directive breakdown @@ -362,7 +362,7 @@ func (r *NnfWorkflowReconciler) finishProposalState(ctx context.Context, workflo return nil, nil } - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.GetNamespace(), @@ -371,7 +371,7 @@ func (r *NnfWorkflowReconciler) finishProposalState(ctx context.Context, workflo err := r.Get(ctx, client.ObjectKeyFromObject(directiveBreakdown), directiveBreakdown) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get DirectiveBreakdown: %v", client.ObjectKeyFromObject(directiveBreakdown)).WithError(err).WithUserMessage("unable to finish parsing DW directive") + return nil, dwsv1alpha3.NewResourceError("could not get DirectiveBreakdown: %v", client.ObjectKeyFromObject(directiveBreakdown)).WithError(err).WithUserMessage("unable to finish parsing DW directive") } if directiveBreakdown.Status.Error != nil { @@ -388,7 +388,7 @@ func (r *NnfWorkflowReconciler) finishProposalState(ctx context.Context, workflo return nil, nil } -func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { log := r.Log.WithValues("Workflow", client.ObjectKeyFromObject(workflow), "Index", index) dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) @@ -399,7 +399,7 @@ func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *d return nil, r.addPersistentStorageReference(ctx, workflow, index) case "jobdw", "create_persistent": // Chain through the DirectiveBreakdown to the Servers object - dbd := &dwsv1alpha2.DirectiveBreakdown{ + dbd := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, @@ -407,10 +407,10 @@ func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *d } err := r.Get(ctx, client.ObjectKeyFromObject(dbd), dbd) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to get DirectiveBreakdown: %v", client.ObjectKeyFromObject(dbd)).WithError(err).WithUserMessage("could not read allocation request") + return nil, dwsv1alpha3.NewResourceError("unable to get DirectiveBreakdown: %v", client.ObjectKeyFromObject(dbd)).WithError(err).WithUserMessage("could not read allocation request") } - s := &dwsv1alpha2.Servers{ + s := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: dbd.Status.Storage.Reference.Name, Namespace: dbd.Status.Storage.Reference.Namespace, @@ -418,12 +418,12 @@ func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *d } err = r.Get(ctx, client.ObjectKeyFromObject(s), s) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to get Servers: %v", client.ObjectKeyFromObject(s)).WithError(err).WithUserMessage("could not read allocation request") + return nil, dwsv1alpha3.NewResourceError("unable to get Servers: %v", client.ObjectKeyFromObject(s)).WithError(err).WithUserMessage("could not read allocation request") } if _, present := os.LookupEnv("RABBIT_TEST_ENV_BYPASS_SERVER_STORAGE_CHECK"); !present { if err := r.validateServerAllocations(ctx, dbd, s); err != nil { - return nil, dwsv1alpha2.NewResourceError("invalid Rabbit allocations for servers: %v", client.ObjectKeyFromObject(s)).WithError(err).WithUserMessage("invalid Rabbit allocations") + return nil, dwsv1alpha3.NewResourceError("invalid Rabbit allocations for servers: %v", client.ObjectKeyFromObject(s)).WithError(err).WithUserMessage("invalid Rabbit allocations") } } @@ -432,7 +432,7 @@ func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *d return Requeue("conflict").withObject(storage), nil } - return nil, dwsv1alpha2.NewResourceError("could not create NnfStorage").WithError(err).WithUserMessage("could not create allocation") + return nil, dwsv1alpha3.NewResourceError("could not create NnfStorage").WithError(err).WithUserMessage("could not create allocation") } case "container": return r.getContainerPorts(ctx, workflow, index) @@ -441,7 +441,7 @@ func (r *NnfWorkflowReconciler) startSetupState(ctx context.Context, workflow *d return nil, nil } -func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) switch dwArgs["command"] { @@ -458,7 +458,7 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * }, } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfStorage), nnfStorage); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err).WithUserMessage("could not allocate storage") + return nil, dwsv1alpha3.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err).WithUserMessage("could not allocate storage") } // If the Status section has not been filled in yet, exit and wait. @@ -468,7 +468,7 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * } if nnfStorage.Status.Error != nil { - handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("storage resource error: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(nnfStorage.Status.Error).WithUserMessage("could not allocate storage"), workflow, index) + handleWorkflowErrorByIndex(dwsv1alpha3.NewResourceError("storage resource error: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(nnfStorage.Status.Error).WithUserMessage("could not allocate storage"), workflow, index) return Requeue("error").withObject(nnfStorage), nil } @@ -481,12 +481,12 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * return nil, nil } -func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { log := r.Log.WithValues("Workflow", client.ObjectKeyFromObject(workflow), "Index", index) dwArgs, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Invalid DW directive: %v", workflow.Spec.DWDirectives[index]).WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Invalid DW directive: %v", workflow.Spec.DWDirectives[index]).WithFatal().WithUser() } // NOTE: We don't need to check for the occurrence of a source or destination parameters since these are required fields and validated through the webhook @@ -520,7 +520,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } if parentDwIndex < 0 { - return nil, nil, nil, dwsv1alpha2.NewResourceError("").WithUserMessage("no directive matching '%v' found in workflow", name).WithFatal().WithUser() + return nil, nil, nil, dwsv1alpha3.NewResourceError("").WithUserMessage("no directive matching '%v' found in workflow", name).WithFatal().WithUser() } // If directive specifies a persistent storage instance, `name` will be the nnfStorageName @@ -540,7 +540,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - return nil, nil, nil, dwsv1alpha2.NewResourceError("could not get NnfStorage %v", client.ObjectKeyFromObject(storage)).WithError(err).WithUserMessage("could not find storage allocation") + return nil, nil, nil, dwsv1alpha3.NewResourceError("could not get NnfStorage %v", client.ObjectKeyFromObject(storage)).WithError(err).WithUserMessage("could not find storage allocation") } storageReference = &corev1.ObjectReference{ @@ -553,25 +553,25 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // Find the desired workflow teardown state for the NNF Access. This instructs the workflow // when to teardown an NNF Access for the servers - var teardownState dwsv1alpha2.WorkflowState + var teardownState dwsv1alpha3.WorkflowState if dwArgs["command"] == "copy_in" { - teardownState = dwsv1alpha2.StatePreRun + teardownState = dwsv1alpha3.StatePreRun if fsType == "gfs2" || fsType == "lustre" { - teardownState = dwsv1alpha2.StatePostRun + teardownState = dwsv1alpha3.StatePostRun if findCopyOutDirectiveIndexByName(workflow, name) >= 0 { - teardownState = dwsv1alpha2.StateTeardown + teardownState = dwsv1alpha3.StateTeardown } } } else if dwArgs["command"] == "copy_out" { - teardownState = dwsv1alpha2.StateTeardown + teardownState = dwsv1alpha3.StateTeardown } // Setup NNF Access for the NNF Servers so we can run data movement on them. access, err := r.setupNnfAccessForServers(ctx, storage, workflow, index, parentDwIndex, teardownState, log) if err != nil { - return storageReference, access, nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not create data movement mount points") + return storageReference, access, nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not create data movement mount points") } // Wait for accesses to go ready @@ -591,19 +591,19 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo return storageReference, nil, nil, nil } - return nil, nil, nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Staging parameter '%s' is invalid", param).WithFatal().WithUser() + return nil, nil, nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Staging parameter '%s' is invalid", param).WithFatal().WithUser() } sourceStorage, sourceAccess, result, err := prepareStagingArgumentFn(dwArgs["source"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not prepare data movement resources") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not prepare data movement resources") } else if result != nil { return result, nil } destStorage, destAccess, result, err := prepareStagingArgumentFn(dwArgs["destination"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("Could not prepare data movement resources") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("Could not prepare data movement resources") } else if result != nil { return result, nil } @@ -612,7 +612,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo for _, access := range []*nnfv1alpha3.NnfAccess{sourceAccess, destAccess} { if access != nil { if err := r.Get(ctx, client.ObjectKeyFromObject(access), access); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfAccess %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not create data movement mount points") + return nil, dwsv1alpha3.NewResourceError("could not get NnfAccess %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not create data movement mount points") } if access.Status.State != "mounted" || !access.Status.Ready { @@ -640,7 +640,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo Namespace: nnfv1alpha3.DataMovementNamespace, }} if err := r.Get(ctx, client.ObjectKeyFromObject(dmm), dmm); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementManager %v", client.ObjectKeyFromObject(dmm)).WithError(err).WithUserMessage("could not determine data movement readiness") + return nil, dwsv1alpha3.NewResourceError("could not get NnfDataMovementManager %v", client.ObjectKeyFromObject(dmm)).WithError(err).WithUserMessage("could not determine data movement readiness") } if !dmm.Status.Ready { return Requeue("pending data movement readiness").withObject(dmm).after(2 * time.Second), nil @@ -651,7 +651,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // For copy_out, the source is the Rabbit and therefore the target var targetStorageRef *corev1.ObjectReference - if workflow.Spec.DesiredState == dwsv1alpha2.StateDataIn { + if workflow.Spec.DesiredState == dwsv1alpha3.StateDataIn { targetStorageRef = destStorage } else { targetStorageRef = sourceStorage @@ -664,7 +664,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo }, } if err := r.Get(ctx, client.ObjectKeyFromObject(targetStorage), targetStorage); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(targetStorage)).WithError(err).WithUserMessage("could not find storage allocations") + return nil, dwsv1alpha3.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(targetStorage)).WithError(err).WithUserMessage("could not find storage allocations") } _, source := splitStagingArgumentIntoNameAndPath(dwArgs["source"]) @@ -675,7 +675,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // Get the pinned nnf-dm profile dmProfile, err := findPinnedDMProfile(ctx, r.Client, workflow.GetNamespace(), indexedResourceName(workflow, index)) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfDataMovementProfile %s", indexedResourceName(workflow, index)).WithError(err).WithUserMessage("could not find data movement profile") + return nil, dwsv1alpha3.NewResourceError("could not get NnfDataMovementProfile %s", indexedResourceName(workflow, index)).WithError(err).WithUserMessage("could not find data movement profile") } dmProfileRef := corev1.ObjectReference{ Kind: reflect.TypeOf(nnfv1alpha3.NnfDataMovementProfile{}).Name(), @@ -689,7 +689,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo // XFS & GFS2 require the individual rabbit nodes are performing the data movement. if len(targetStorage.Spec.AllocationSets) != 1 { - return nil, dwsv1alpha2.NewResourceError("file system %s has unexpected allocation sets %d", fsType, len(targetStorage.Spec.AllocationSets)).WithUserMessage("unexpected allocation count").WithFatal() + return nil, dwsv1alpha3.NewResourceError("file system %s has unexpected allocation sets %d", fsType, len(targetStorage.Spec.AllocationSets)).WithUserMessage("unexpected allocation count").WithFatal() } nodes := targetStorage.Spec.AllocationSets[0].Nodes @@ -717,8 +717,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo }, } - dwsv1alpha2.AddWorkflowLabels(dm, workflow) - dwsv1alpha2.AddOwnerLabels(dm, workflow) + dwsv1alpha3.AddWorkflowLabels(dm, workflow) + dwsv1alpha3.AddOwnerLabels(dm, workflow) nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) nnfv1alpha3.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) @@ -726,7 +726,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) if err := r.Create(ctx, dm); err != nil { if !errors.IsAlreadyExists(err) { - return nil, dwsv1alpha2.NewResourceError("could not create NnfDataMovement: %v", client.ObjectKeyFromObject(dm)).WithError(err).WithUserMessage("could not start data movement") + return nil, dwsv1alpha3.NewResourceError("could not create NnfDataMovement: %v", client.ObjectKeyFromObject(dm)).WithError(err).WithUserMessage("could not start data movement") } } } @@ -755,8 +755,8 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo }, } - dwsv1alpha2.AddWorkflowLabels(dm, workflow) - dwsv1alpha2.AddOwnerLabels(dm, workflow) + dwsv1alpha3.AddWorkflowLabels(dm, workflow) + dwsv1alpha3.AddOwnerLabels(dm, workflow) nnfv1alpha3.AddDataMovementTeardownStateLabel(dm, workflow.Status.State) nnfv1alpha3.AddDataMovementInitiatorLabel(dm, dwArgs["command"]) addDirectiveIndexLabel(dm, index) @@ -764,7 +764,7 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo log.Info("Creating NNF Data Movement", "name", client.ObjectKeyFromObject(dm).String()) if err := r.Create(ctx, dm); err != nil { if !errors.IsAlreadyExists(err) { - return nil, dwsv1alpha2.NewResourceError("could not create NnfDataMovement: %v", client.ObjectKeyFromObject(dm)).WithError(err).WithUserMessage("could not start data movement") + return nil, dwsv1alpha3.NewResourceError("could not create NnfDataMovement: %v", client.ObjectKeyFromObject(dm)).WithError(err).WithUserMessage("could not start data movement") } } } @@ -773,17 +773,17 @@ func (r *NnfWorkflowReconciler) startDataInOutState(ctx context.Context, workflo } // Monitor a data movement resource for completion -func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { // Wait for data movement resources to complete - matchingLabels := dwsv1alpha2.MatchingOwner(workflow) + matchingLabels := dwsv1alpha3.MatchingOwner(workflow) matchingLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(workflow.Status.State) dataMovementList := &nnfv1alpha3.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") + return nil, dwsv1alpha3.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } // Since the Finish state is only called when copy_in / copy_out directives are present - the lack of any items @@ -802,7 +802,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl // TODO: Detailed Fail Message? for _, dm := range dataMovementList.Items { if dm.Status.Status != nnfv1alpha3.DataMovementConditionReasonSuccess { - handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("").WithUserMessage( + handleWorkflowErrorByIndex(dwsv1alpha3.NewResourceError("").WithUserMessage( fmt.Sprintf("data movement operation failed during '%s', message: %s", workflow.Status.State, dm.Status.Message)). WithFatal(), workflow, index) return Requeue("error").withObject(&dm), nil @@ -812,7 +812,7 @@ func (r *NnfWorkflowReconciler) finishDataInOutState(ctx context.Context, workfl return nil, nil } -func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { log := r.Log.WithValues("Workflow", client.ObjectKeyFromObject(workflow), "Index", index) dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) @@ -821,7 +821,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * // time. unmountResult, err := r.unmountNnfAccessIfNecessary(ctx, workflow, index, "servers") if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not unmount NnfAccess index: %v", index).WithError(err).WithUserMessage("could not unmount on Rabbit nodes") + return nil, dwsv1alpha3.NewResourceError("could not unmount NnfAccess index: %v", index).WithError(err).WithUserMessage("could not unmount on Rabbit nodes") } if unmountResult != nil { @@ -833,7 +833,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * result, err := r.userContainerHandler(ctx, workflow, dwArgs, index, log) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("unable to create/update Container Jobs: " + err.Error()) + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("unable to create/update Container Jobs: " + err.Error()) } if result != nil { // a requeue can be returned, so make sure that happens return result, nil @@ -845,7 +845,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, index) nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } access := &nnfv1alpha3.NnfAccess{ @@ -858,12 +858,12 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * // Create an NNFAccess for the compute clients result, err := ctrl.CreateOrUpdate(ctx, r.Client, access, func() error { - dwsv1alpha2.AddWorkflowLabels(access, workflow) - dwsv1alpha2.AddOwnerLabels(access, workflow) + dwsv1alpha3.AddWorkflowLabels(access, workflow) + dwsv1alpha3.AddOwnerLabels(access, workflow) addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) - access.Spec.TeardownState = dwsv1alpha2.StatePostRun + access.Spec.TeardownState = dwsv1alpha3.StatePostRun access.Spec.DesiredState = "mounted" access.Spec.UserID = workflow.Spec.UserID access.Spec.GroupID = workflow.Spec.GroupID @@ -890,7 +890,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * return ctrl.SetControllerReference(workflow, access, r.Scheme) }) if err != nil { - return nil, dwsv1alpha2.NewResourceError("Could not CreateOrUpdate compute node NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not mount file system on compute nodes") + return nil, dwsv1alpha3.NewResourceError("Could not CreateOrUpdate compute node NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err).WithUserMessage("could not mount file system on compute nodes") } if result == controllerutil.OperationResultCreated { @@ -908,7 +908,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * fsType, err := r.getDirectiveFileSystemType(ctx, workflow, index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("Unable to determine directive file system type") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("Unable to determine directive file system type") } if fsType == "gfs2" || fsType == "lustre" { @@ -924,21 +924,21 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * // Set the teardown state to post run. If there is a copy_out or container directive that // uses this storage instance, set the teardown state so NNF Access is preserved up until // Teardown - teardownState := dwsv1alpha2.StatePostRun + teardownState := dwsv1alpha3.StatePostRun if findCopyOutDirectiveIndexByName(workflow, dwArgs["name"]) >= 0 || findContainerDirectiveIndexByName(workflow, dwArgs["name"]) >= 0 { - teardownState = dwsv1alpha2.StateTeardown + teardownState = dwsv1alpha3.StateTeardown } _, err := r.setupNnfAccessForServers(ctx, storage, workflow, index, index, teardownState, log) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not setup NNF Access in state %s", workflow.Status.State).WithError(err).WithUserMessage("could not mount file system on Rabbit nodes") + return nil, dwsv1alpha3.NewResourceError("could not setup NNF Access in state %s", workflow.Status.State).WithError(err).WithUserMessage("could not mount file system on Rabbit nodes") } } return nil, nil } -func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) @@ -957,7 +957,7 @@ func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow case "container": return r.waitForContainersToStart(ctx, workflow, index) default: - return nil, dwsv1alpha2.NewResourceError("unexpected directive: %v", dwArgs["command"]).WithFatal().WithUserMessage("could not mount file system on compute nodes") + return nil, dwsv1alpha3.NewResourceError("unexpected directive: %v", dwArgs["command"]).WithFatal().WithUserMessage("could not mount file system on compute nodes") } workflow.Status.Env[envName] = buildComputeMountPath(workflow, index) @@ -965,7 +965,7 @@ func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow // Containers do not have NNFAccesses, so only do this after r.waitForContainersToStart() would have returned result, err := r.waitForNnfAccessStateAndReady(ctx, workflow, index, "mounted") if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not mount rabbit NnfAccess for index %v", index).WithError(err).WithUserMessage("could not mount file system on compute nodes") + return nil, dwsv1alpha3.NewResourceError("could not mount rabbit NnfAccess for index %v", index).WithError(err).WithUserMessage("could not mount file system on compute nodes") } else if result != nil { return result, nil } @@ -973,7 +973,7 @@ func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow return nil, nil } -func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) @@ -985,19 +985,19 @@ func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow // in a different job even if there is data movement happening on the Rabbits. if result, err := r.unmountNnfAccessIfNecessary(ctx, workflow, index, "computes"); result != nil || err != nil { if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not unmount file system from compute nodes") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not unmount file system from compute nodes") } return result, nil } // Wait for data movement resources to complete - matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels := dwsv1alpha3.MatchingOwner(workflow) + matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha3.StatePostRun) dataMovementList := &nnfv1alpha3.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") + return nil, dwsv1alpha3.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { @@ -1009,7 +1009,7 @@ func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow // Unmount the NnfAccess for the servers resource if necessary. fsType, err := r.getDirectiveFileSystemType(ctx, workflow, index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("Unable to determine directive file system type") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("Unable to determine directive file system type") } if fsType == "gfs2" || fsType == "lustre" { @@ -1021,7 +1021,7 @@ func (r *NnfWorkflowReconciler) startPostRunState(ctx context.Context, workflow return nil, nil } -func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if dwArgs["command"] == "container" { @@ -1030,19 +1030,19 @@ func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow result, err := r.waitForNnfAccessStateAndReady(ctx, workflow, index, "unmounted") if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not unmount compute NnfAccess for index %v", index).WithError(err).WithUserMessage("could not unmount file system on compute nodes") + return nil, dwsv1alpha3.NewResourceError("could not unmount compute NnfAccess for index %v", index).WithError(err).WithUserMessage("could not unmount file system on compute nodes") } else if result != nil { return result, nil } // Any user created copy-offload data movement requests created during run must report any errors to the workflow. // TODO: Customer asked if this could be optional - matchingLabels := dwsv1alpha2.MatchingOwner(workflow) - matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha2.StatePostRun) + matchingLabels := dwsv1alpha3.MatchingOwner(workflow) + matchingLabels[nnfv1alpha3.DataMovementTeardownStateLabel] = string(dwsv1alpha3.StatePostRun) dataMovementList := &nnfv1alpha3.NnfDataMovementList{} if err := r.List(ctx, dataMovementList, matchingLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") + return nil, dwsv1alpha3.NewResourceError("could not list NnfDataMovements with labels: %v", matchingLabels).WithError(err).WithUserMessage("could not find data movement information") } for _, dm := range dataMovementList.Items { @@ -1051,7 +1051,7 @@ func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow } if dm.Status.Status == nnfv1alpha3.DataMovementConditionReasonFailed { - handleWorkflowErrorByIndex(dwsv1alpha2.NewResourceError("data movement %v failed", client.ObjectKeyFromObject(&dm)).WithUserMessage("data movement failed").WithFatal(), workflow, index) + handleWorkflowErrorByIndex(dwsv1alpha3.NewResourceError("data movement %v failed", client.ObjectKeyFromObject(&dm)).WithUserMessage("data movement failed").WithFatal(), workflow, index) return Requeue("error").withObject(&dm), nil } } @@ -1059,7 +1059,7 @@ func (r *NnfWorkflowReconciler) finishPostRunState(ctx context.Context, workflow return nil, nil } -func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) switch dwArgs["command"] { @@ -1072,14 +1072,14 @@ func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow // Delete the NnfDataMovement and NnfAccess for this directive before removing the NnfStorage. // copy_in/out directives can reference NnfStorage from a different directive, so all the NnfAccesses // need to be removed first. - childObjects := []dwsv1alpha2.ObjectList{ + childObjects := []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfDataMovementList{}, &nnfv1alpha3.NnfAccessList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha3.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not delete NnfDataMovement and NnfAccess children").WithError(err).WithUserMessage("could not stop data movement and unmount file systems") + return nil, dwsv1alpha3.NewResourceError("could not delete NnfDataMovement and NnfAccess children").WithError(err).WithUserMessage("could not stop data movement and unmount file systems") } if !deleteStatus.Complete() { @@ -1090,14 +1090,14 @@ func (r *NnfWorkflowReconciler) startTeardownState(ctx context.Context, workflow return nil, nil } -func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { log := r.Log.WithValues("Workflow", client.ObjectKeyFromObject(workflow), "Index", index) dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) switch dwArgs["command"] { case "create_persistent": for _, driverStatus := range workflow.Status.Drivers { - if driverStatus.WatchState == dwsv1alpha2.StateTeardown { + if driverStatus.WatchState == dwsv1alpha3.StateTeardown { continue } @@ -1108,29 +1108,29 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo persistentStorage, err := r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) } persistentStorage.SetOwnerReferences([]metav1.OwnerReference{}) - dwsv1alpha2.RemoveOwnerLabels(persistentStorage) + dwsv1alpha3.RemoveOwnerLabels(persistentStorage) labels := persistentStorage.GetLabels() delete(labels, nnfv1alpha3.DirectiveIndexLabel) persistentStorage.SetLabels(labels) err = r.Update(ctx, persistentStorage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not update PersistentStorage: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not finalize peristent storage") + return nil, dwsv1alpha3.NewResourceError("could not update PersistentStorage: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not finalize peristent storage") } log.Info("Removed owner reference from persistent storage", "psi", persistentStorage) persistentStorage, err = r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) } labels = persistentStorage.GetLabels() if labels != nil { - if ownerUid, exists := labels[dwsv1alpha2.OwnerUidLabel]; exists { + if ownerUid, exists := labels[dwsv1alpha3.OwnerUidLabel]; exists { if types.UID(ownerUid) == workflow.GetUID() { return Requeue("persistent storage owner release").after(2 * time.Second).withObject(persistentStorage), nil } @@ -1140,40 +1140,40 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo persistentStorage, err := r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { if !apierrors.IsNotFound(err) { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("could not find peristent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithFatal().WithUser().WithUserMessage("could not find peristent storage %v", dwArgs["name"]) } return nil, nil } if persistentStorage.Spec.UserID != workflow.Spec.UserID { - return nil, dwsv1alpha2.NewResourceError("Existing persistent storage user ID %v does not match user ID %v", persistentStorage.Spec.UserID, workflow.Spec.UserID).WithError(err).WithUserMessage("user ID does not match existing persistent storage").WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("Existing persistent storage user ID %v does not match user ID %v", persistentStorage.Spec.UserID, workflow.Spec.UserID).WithError(err).WithUserMessage("user ID does not match existing persistent storage").WithFatal().WithUser() } if len(persistentStorage.Spec.ConsumerReferences) != 0 { err = fmt.Errorf("PersistentStorage cannot be deleted with %v consumers", len(persistentStorage.Spec.ConsumerReferences)) log.Info(err.Error()) - return nil, dwsv1alpha2.NewResourceError("persistent storage cannot be deleted with %v consumers", len(persistentStorage.Spec.ConsumerReferences)).WithError(err).WithUserMessage("persistent storage cannot be deleted while in use").WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("persistent storage cannot be deleted with %v consumers", len(persistentStorage.Spec.ConsumerReferences)).WithError(err).WithUserMessage("persistent storage cannot be deleted while in use").WithFatal().WithUser() } - persistentStorage.Spec.State = dwsv1alpha2.PSIStateDestroying + persistentStorage.Spec.State = dwsv1alpha3.PSIStateDestroying - dwsv1alpha2.AddOwnerLabels(persistentStorage, workflow) + dwsv1alpha3.AddOwnerLabels(persistentStorage, workflow) addDirectiveIndexLabel(persistentStorage, index) if err := controllerutil.SetControllerReference(workflow, persistentStorage, r.Scheme); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not assign workflow as owner of PersistentInstance: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not delete persistent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("could not assign workflow as owner of PersistentInstance: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not delete persistent storage %v", dwArgs["name"]) } err = r.Update(ctx, persistentStorage) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not update PersistentInstance: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not delete persistent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("could not update PersistentInstance: %v", client.ObjectKeyFromObject(persistentStorage)).WithError(err).WithUserMessage("could not delete persistent storage %v", dwArgs["name"]) } log.Info("Add owner reference for persistent storage for deletion", "psi", persistentStorage) persistentStorage, err = r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not find persistent storage %v", dwArgs["name"]) } labels := persistentStorage.GetLabels() @@ -1181,14 +1181,14 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo return Requeue("persistent storage owner add").after(2 * time.Second).withObject(persistentStorage), nil } - ownerUid, exists := labels[dwsv1alpha2.OwnerUidLabel] + ownerUid, exists := labels[dwsv1alpha3.OwnerUidLabel] if !exists || types.UID(ownerUid) != workflow.GetUID() { return Requeue("persistent storage owner add").after(2 * time.Second).withObject(persistentStorage), nil } case "persistentdw": err := r.removePersistentStorageReference(ctx, workflow, index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("Could not remove persistent storage reference") + return nil, dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("Could not remove persistent storage reference") } case "container": // Release container ports @@ -1199,14 +1199,14 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo default: } - childObjects := []dwsv1alpha2.ObjectList{ + childObjects := []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfStorageList{}, - &dwsv1alpha2.PersistentStorageInstanceList{}, + &dwsv1alpha3.PersistentStorageInstanceList{}, } - deleteStatus, err := dwsv1alpha2.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) + deleteStatus, err := dwsv1alpha3.DeleteChildrenWithLabels(ctx, r.Client, childObjects, workflow, client.MatchingLabels{nnfv1alpha3.DirectiveIndexLabel: strconv.Itoa(index)}) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not delete NnfStorage and PersistentStorageInstance children").WithError(err).WithUserMessage("could not delete storage allocations") + return nil, dwsv1alpha3.NewResourceError("could not delete NnfStorage and PersistentStorageInstance children").WithError(err).WithUserMessage("could not delete storage allocations") } if !deleteStatus.Complete() { @@ -1218,22 +1218,22 @@ func (r *NnfWorkflowReconciler) finishTeardownState(ctx context.Context, workflo // SetupWithManager sets up the controller with the Manager. func (r *NnfWorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ + r.ChildObjects = []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfDataMovementList{}, &nnfv1alpha3.NnfAccessList{}, &nnfv1alpha3.NnfStorageList{}, - &dwsv1alpha2.PersistentStorageInstanceList{}, - &dwsv1alpha2.DirectiveBreakdownList{}, + &dwsv1alpha3.PersistentStorageInstanceList{}, + &dwsv1alpha3.DirectiveBreakdownList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.Workflow{}). + For(&dwsv1alpha3.Workflow{}). Owns(&nnfv1alpha3.NnfAccess{}). - Owns(&dwsv1alpha2.DirectiveBreakdown{}). - Owns(&dwsv1alpha2.PersistentStorageInstance{}). - Watches(&nnfv1alpha3.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). - Watches(&nnfv1alpha3.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Owns(&dwsv1alpha3.DirectiveBreakdown{}). + Owns(&dwsv1alpha3.PersistentStorageInstance{}). + Watches(&nnfv1alpha3.NnfDataMovement{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). + Watches(&nnfv1alpha3.NnfStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha3.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_workflow_controller_container_helpers.go b/internal/controller/nnf_workflow_controller_container_helpers.go index ec63d961d..7adc55958 100644 --- a/internal/controller/nnf_workflow_controller_container_helpers.go +++ b/internal/controller/nnf_workflow_controller_container_helpers.go @@ -25,7 +25,7 @@ import ( "strconv" "strings" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/go-logr/logr" mpicommonv1 "github.com/kubeflow/common/pkg/apis/common/v1" @@ -41,7 +41,7 @@ import ( ) type nnfUserContainer struct { - workflow *dwsv1alpha2.Workflow + workflow *dwsv1alpha3.Workflow profile *nnfv1alpha3.NnfContainerProfile nnfNodes []string volumes []nnfContainerVolume @@ -245,9 +245,9 @@ func (c *nnfUserContainer) createNonMPIJob() error { func (c *nnfUserContainer) applyLabels(job metav1.Object) error { // Apply Job Labels/Owners - dwsv1alpha2.InheritParentLabels(job, c.workflow) - dwsv1alpha2.AddOwnerLabels(job, c.workflow) - dwsv1alpha2.AddWorkflowLabels(job, c.workflow) + dwsv1alpha3.InheritParentLabels(job, c.workflow) + dwsv1alpha3.AddOwnerLabels(job, c.workflow) + dwsv1alpha3.AddWorkflowLabels(job, c.workflow) labels := job.GetLabels() labels[nnfv1alpha3.ContainerLabel] = c.workflow.Name @@ -447,7 +447,7 @@ func (c *nnfUserContainer) getHostPorts() ([]uint16, error) { // Make sure we found the number of ports in the port manager that we expect if len(ports) != expectedPorts { - return nil, dwsv1alpha2.NewResourceError( + return nil, dwsv1alpha3.NewResourceError( "number of ports found in NnfPortManager's allocation (%d) does not equal the profile's requested ports (%d)", len(ports), expectedPorts). WithUserMessage("requested ports do not meet the number of allocated ports").WithFatal() diff --git a/internal/controller/nnf_workflow_controller_helpers.go b/internal/controller/nnf_workflow_controller_helpers.go index cbaa96d2b..a194bf635 100644 --- a/internal/controller/nnf_workflow_controller_helpers.go +++ b/internal/controller/nnf_workflow_controller_helpers.go @@ -31,7 +31,7 @@ import ( "strings" "time" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/dwdparse" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" @@ -55,7 +55,7 @@ type result struct { ctrl.Result reason string object client.Object - deleteStatus *dwsv1alpha2.DeleteStatus + deleteStatus *dwsv1alpha3.DeleteStatus } // When workflow stages cannot advance they return a Requeue result with a particular reason. @@ -78,7 +78,7 @@ func (r *result) withObject(object client.Object) *result { return r } -func (r *result) withDeleteStatus(d dwsv1alpha2.DeleteStatus) *result { +func (r *result) withDeleteStatus(d dwsv1alpha3.DeleteStatus) *result { r.deleteStatus = &d return r } @@ -106,7 +106,7 @@ func (r *result) info() []interface{} { } // Validate the workflow and return any error found -func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1alpha2.Workflow) error { +func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1alpha3.Workflow) error { var createPersistentCount, deletePersistentCount, directiveCount, containerCount int for index, directive := range wf.Spec.DWDirectives { @@ -120,7 +120,7 @@ func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1a case "copy_in", "copy_out": if err := r.validateStagingDirective(ctx, wf, directive); err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("invalid staging Directive: '%v'", directive) + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("invalid staging Directive: '%v'", directive) } case "create_persistent": @@ -131,14 +131,14 @@ func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1a case "persistentdw": if err := r.validatePersistentInstanceDirective(ctx, wf, directive); err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not validate persistent instance: '%s'", directive) + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not validate persistent instance: '%s'", directive) } case "container": containerCount++ if err := r.validateContainerDirective(ctx, wf, index); err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not validate container directive: '%s'", directive) + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not validate container directive: '%s'", directive) } } } @@ -146,12 +146,12 @@ func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1a if directiveCount > 1 { // Ensure create_persistent or destroy_persistent are singletons in the workflow if createPersistentCount+deletePersistentCount > 0 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("only a single create_persistent or destroy_persistent directive is allowed per workflow").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("only a single create_persistent or destroy_persistent directive is allowed per workflow").WithFatal().WithUser() } // Only allow 1 container directive (for now) if containerCount > 1 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("only a single container directive is supported per workflow").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("only a single container directive is supported per workflow").WithFatal().WithUser() } } @@ -159,7 +159,7 @@ func (r *NnfWorkflowReconciler) validateWorkflow(ctx context.Context, wf *dwsv1a } // validateStagingDirective validates the staging copy_in/copy_out directives. -func (r *NnfWorkflowReconciler) validateStagingDirective(ctx context.Context, wf *dwsv1alpha2.Workflow, directive string) error { +func (r *NnfWorkflowReconciler) validateStagingDirective(ctx context.Context, wf *dwsv1alpha3.Workflow, directive string) error { // Validate staging directive of the form... // #DW copy_in source=[SOURCE] destination=[DESTINATION] // #DW copy_out source=[SOURCE] destination=[DESTINATION] @@ -173,32 +173,32 @@ func (r *NnfWorkflowReconciler) validateStagingDirective(ctx context.Context, wf if strings.HasPrefix(arg, "$DW_JOB_") { index := findDirectiveIndexByName(wf, name, "jobdw") if index == -1 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("job storage instance '%s' not found", name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("job storage instance '%s' not found", name).WithFatal().WithUser() } args, err := dwdparse.BuildArgsMap(wf.Spec.DWDirectives[index]) if err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", wf.Spec.DWDirectives[index]).WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", wf.Spec.DWDirectives[index]).WithFatal() } fsType, exists := args["type"] if !exists { - return dwsv1alpha2.NewResourceError("").WithUserMessage("invalid DW directive match for staging argument").WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("invalid DW directive match for staging argument").WithFatal() } if fsType == "raw" { - return dwsv1alpha2.NewResourceError("").WithUserMessage("data movement can not be used with raw allocations").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("data movement can not be used with raw allocations").WithFatal().WithUser() } } else if strings.HasPrefix(arg, "$DW_PERSISTENT_") { if err := r.validatePersistentInstanceForStaging(ctx, name, wf.Namespace); err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("persistent storage instance '%s' not found", name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("persistent storage instance '%s' not found", name).WithFatal().WithUser() } if findDirectiveIndexByName(wf, name, "persistentdw") == -1 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("persistentdw directive mentioning '%s' not found", name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("persistentdw directive mentioning '%s' not found", name).WithFatal().WithUser() } } else { if r.findLustreFileSystemForPath(ctx, arg, r.Log) == nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", arg).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", arg).WithFatal().WithUser() } } @@ -207,31 +207,31 @@ func (r *NnfWorkflowReconciler) validateStagingDirective(ctx context.Context, wf args, err := dwdparse.BuildArgsMap(directive) if err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", directive).WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", directive).WithFatal() } if err := validateStagingArgument(args["source"]); err != nil { - return dwsv1alpha2.NewResourceError("Invalid source argument: '%s'", args["source"]).WithError(err) + return dwsv1alpha3.NewResourceError("Invalid source argument: '%s'", args["source"]).WithError(err) } if err := validateStagingArgument(args["destination"]); err != nil { - return dwsv1alpha2.NewResourceError("Invalid destination argument: '%s'", args["destination"]).WithError(err) + return dwsv1alpha3.NewResourceError("Invalid destination argument: '%s'", args["destination"]).WithError(err) } return nil } // validateContainerDirective validates the container directive. -func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) error { +func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) error { args, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", workflow.Spec.DWDirectives[index]).WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("invalid DW directive: '%s'", workflow.Spec.DWDirectives[index]).WithFatal() } // Ensure the supplied profile exists profile, err := findContainerProfile(ctx, r.Client, workflow, index) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("no valid container profile found").WithError(err).WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("no valid container profile found").WithError(err).WithFatal() } // Check to see if the container storage argument is in the list of storages in the container profile @@ -241,7 +241,7 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, return nil } } - return dwsv1alpha2.NewResourceError("").WithUserMessage("storage '%s' not found in container profile '%s'", storageName, profile.Name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("storage '%s' not found in container profile '%s'", storageName, profile.Name).WithFatal().WithUser() } checkContainerFs := func(idx int) error { @@ -267,7 +267,7 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, } if strings.ToLower(t) != "lustre" && strings.ToLower(t) != "gfs2" { - return dwsv1alpha2.NewResourceError("").WithUserMessage("unsupported container filesystem: %s", t).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("unsupported container filesystem: %s", t).WithFatal().WithUser() } return nil @@ -284,7 +284,7 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, if strings.HasPrefix(arg, "DW_JOB_") { idx := findDirectiveIndexByName(workflow, storageName, "jobdw") if idx == -1 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("jobdw directive mentioning '%s' not found", storageName).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("jobdw directive mentioning '%s' not found", storageName).WithFatal().WithUser() } if err := checkContainerFs(idx); err != nil { return err @@ -295,11 +295,11 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, suppliedStorageArguments = append(suppliedStorageArguments, arg) } else if strings.HasPrefix(arg, "DW_PERSISTENT_") { if err := r.validatePersistentInstance(ctx, storageName, workflow.Namespace); err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("persistent storage instance '%s' not found", storageName).WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("persistent storage instance '%s' not found", storageName).WithFatal() } idx := findDirectiveIndexByName(workflow, storageName, "persistentdw") if idx == -1 { - return dwsv1alpha2.NewResourceError("").WithUserMessage("persistentdw directive mentioning '%s' not found", storageName).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("persistentdw directive mentioning '%s' not found", storageName).WithFatal().WithUser() } if err := checkContainerFs(idx); err != nil { return err @@ -311,14 +311,14 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, } else if strings.HasPrefix(arg, "DW_GLOBAL_") { // Look up the global lustre fs by path rather than LustreFilesystem name if globalLustre := r.findLustreFileSystemForPath(ctx, storageName, r.Log); globalLustre == nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", storageName).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", storageName).WithFatal().WithUser() } if err := checkStorageIsInProfile(arg); err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("storage '%s' is not present in the container profile", arg).WithUser().WithFatal() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("storage '%s' is not present in the container profile", arg).WithUser().WithFatal() } suppliedStorageArguments = append(suppliedStorageArguments, arg) } else { - return dwsv1alpha2.NewResourceError("").WithUserMessage("unrecognized container argument: %s", arg).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("unrecognized container argument: %s", arg).WithFatal().WithUser() } } } @@ -337,7 +337,7 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, for _, storage := range profile.Data.Storages { if !storage.Optional { if !findInStorageArguments(storage.Name) { - return dwsv1alpha2.NewResourceError("").WithUserMessage("storage '%s' in container profile '%s' is not optional: storage argument not found in the supplied arguments", + return dwsv1alpha3.NewResourceError("").WithUserMessage("storage '%s' in container profile '%s' is not optional: storage argument not found in the supplied arguments", storage.Name, profile.Name).WithUser().WithFatal() } } @@ -357,15 +357,15 @@ func (r *NnfWorkflowReconciler) validateContainerDirective(ctx context.Context, func (r *NnfWorkflowReconciler) validatePersistentInstanceForStaging(ctx context.Context, name string, namespace string) error { psi, err := r.getPersistentStorageInstance(ctx, name, namespace) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance '%s'", name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance '%s'", name).WithFatal().WithUser() } if psi.Spec.FsType == "raw" { - return dwsv1alpha2.NewResourceError("").WithUserMessage("data movement can not be used with raw allocations").WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("data movement can not be used with raw allocations").WithFatal().WithUser() } if !psi.DeletionTimestamp.IsZero() { - return dwsv1alpha2.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", name).WithUser().WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", name).WithUser().WithFatal() } return nil @@ -375,39 +375,39 @@ func (r *NnfWorkflowReconciler) validatePersistentInstanceForStaging(ctx context func (r *NnfWorkflowReconciler) validatePersistentInstance(ctx context.Context, name string, namespace string) error { psi, err := r.getPersistentStorageInstance(ctx, name, namespace) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance %s", name).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance %s", name).WithFatal().WithUser() } if !psi.DeletionTimestamp.IsZero() { - return dwsv1alpha2.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", name).WithUser().WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", name).WithUser().WithFatal() } return nil } // validatePersistentInstance validates the persistentdw directive. -func (r *NnfWorkflowReconciler) validatePersistentInstanceDirective(ctx context.Context, wf *dwsv1alpha2.Workflow, directive string) error { +func (r *NnfWorkflowReconciler) validatePersistentInstanceDirective(ctx context.Context, wf *dwsv1alpha3.Workflow, directive string) error { // Validate that the persistent instance is available and not in the process of being deleted args, err := dwdparse.BuildArgsMap(directive) if err != nil { - return dwsv1alpha2.NewResourceError("invalid DW directive: %s", directive).WithFatal() + return dwsv1alpha3.NewResourceError("invalid DW directive: %s", directive).WithFatal() } psi, err := r.getPersistentStorageInstance(ctx, args["name"], wf.Namespace) if err != nil { - return dwsv1alpha2.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance '%s'", args["name"]).WithFatal().WithUser() + return dwsv1alpha3.NewResourceError("").WithError(err).WithUserMessage("could not get PersistentStorageInstance '%s'", args["name"]).WithFatal().WithUser() } if !psi.DeletionTimestamp.IsZero() { - return dwsv1alpha2.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", args["name"]).WithUser().WithFatal() + return dwsv1alpha3.NewResourceError("").WithUserMessage("Persistent storage instance '%s' is deleting", args["name"]).WithUser().WithFatal() } return nil } // Retrieve the persistent storage instance with the specified name -func (r *NnfWorkflowReconciler) getPersistentStorageInstance(ctx context.Context, name string, namespace string) (*dwsv1alpha2.PersistentStorageInstance, error) { - psi := &dwsv1alpha2.PersistentStorageInstance{ +func (r *NnfWorkflowReconciler) getPersistentStorageInstance(ctx context.Context, name string, namespace string) (*dwsv1alpha3.PersistentStorageInstance, error) { + psi := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -420,7 +420,7 @@ func (r *NnfWorkflowReconciler) getPersistentStorageInstance(ctx context.Context // generateDirectiveBreakdown creates a DirectiveBreakdown for any #DW directive that needs to specify storage // or compute node information to the WLM (jobdw, create_persistent, persistentdw) -func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, dwIndex int, workflow *dwsv1alpha2.Workflow, log logr.Logger) (*dwsv1alpha2.DirectiveBreakdown, error) { +func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, dwIndex int, workflow *dwsv1alpha3.Workflow, log logr.Logger) (*dwsv1alpha3.DirectiveBreakdown, error) { // DWDirectives that we need to generate directiveBreakdowns for look like this: // #DW command arguments... @@ -455,7 +455,7 @@ func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, // We care about the commands that generate a breakdown if breakThisDown == dwArgs["command"] { dwdName := indexedResourceName(workflow, dwIndex) - directiveBreakdown := &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown := &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: dwdName, Namespace: workflow.Namespace, @@ -465,8 +465,8 @@ func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, result, err := ctrl.CreateOrUpdate(ctx, r.Client, directiveBreakdown, // Mutate function to fill in a directiveBreakdown func() error { - dwsv1alpha2.AddWorkflowLabels(directiveBreakdown, workflow) - dwsv1alpha2.AddOwnerLabels(directiveBreakdown, workflow) + dwsv1alpha3.AddWorkflowLabels(directiveBreakdown, workflow) + dwsv1alpha3.AddOwnerLabels(directiveBreakdown, workflow) addDirectiveIndexLabel(directiveBreakdown, dwIndex) directiveBreakdown.Spec.Directive = directive @@ -477,7 +477,7 @@ func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, }) if err != nil { - return nil, dwsv1alpha2.NewResourceError("CreateOrUpdate failed for DirectiveBreakdown: %v", client.ObjectKeyFromObject(directiveBreakdown)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("CreateOrUpdate failed for DirectiveBreakdown: %v", client.ObjectKeyFromObject(directiveBreakdown)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -496,9 +496,9 @@ func (r *NnfWorkflowReconciler) generateDirectiveBreakdown(ctx context.Context, return nil, nil } -func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, dbd *dwsv1alpha2.DirectiveBreakdown, servers *dwsv1alpha2.Servers) error { +func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, dbd *dwsv1alpha3.DirectiveBreakdown, servers *dwsv1alpha3.Servers) error { if len(dbd.Status.Storage.AllocationSets) != 0 && len(dbd.Status.Storage.AllocationSets) != len(servers.Spec.AllocationSets) { - return dwsv1alpha2.NewResourceError("Servers resource does not meet storage requirements for directive '%s'", dbd.Spec.Directive).WithUserMessage("Allocation request does not meet directive requirements").WithWLM().WithFatal() + return dwsv1alpha3.NewResourceError("Servers resource does not meet storage requirements for directive '%s'", dbd.Spec.Directive).WithUserMessage("Allocation request does not meet directive requirements").WithWLM().WithFatal() } for _, breakdownAllocationSet := range dbd.Status.Storage.AllocationSets { @@ -510,15 +510,15 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d found = true - if breakdownAllocationSet.AllocationStrategy == dwsv1alpha2.AllocateSingleServer { + if breakdownAllocationSet.AllocationStrategy == dwsv1alpha3.AllocateSingleServer { if len(serverAllocationSet.Storage) != 1 || serverAllocationSet.Storage[0].AllocationCount != 1 { - return dwsv1alpha2.NewResourceError("allocation set %s expected single allocation", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() + return dwsv1alpha3.NewResourceError("allocation set %s expected single allocation", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() } } var totalCapacity int64 = 0 - if breakdownAllocationSet.AllocationStrategy == dwsv1alpha2.AllocateAcrossServers { + if breakdownAllocationSet.AllocationStrategy == dwsv1alpha3.AllocateAcrossServers { for _, serverAllocation := range serverAllocationSet.Storage { totalCapacity += serverAllocationSet.AllocationSize * int64(serverAllocation.AllocationCount) } @@ -527,12 +527,12 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d } if totalCapacity < breakdownAllocationSet.MinimumCapacity { - return dwsv1alpha2.NewResourceError("allocation set %s specified insufficient capacity", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() + return dwsv1alpha3.NewResourceError("allocation set %s specified insufficient capacity", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() } // Look up each of the storages specified to make sure they exist for _, serverAllocation := range serverAllocationSet.Storage { - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: serverAllocation.Name, Namespace: corev1.NamespaceDefault, @@ -540,13 +540,13 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d } if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - return dwsv1alpha2.NewResourceError("could not get storage: %s", client.ObjectKeyFromObject(storage)).WithError(err).WithUserMessage("storage directive requirements were not satisfied").WithFatal() + return dwsv1alpha3.NewResourceError("could not get storage: %s", client.ObjectKeyFromObject(storage)).WithError(err).WithUserMessage("storage directive requirements were not satisfied").WithFatal() } } } if !found { - return dwsv1alpha2.NewResourceError("allocation set %s not found in Servers resource", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() + return dwsv1alpha3.NewResourceError("allocation set %s not found in Servers resource", breakdownAllocationSet.Label).WithUserMessage("storage directive requirements were not satisfied").WithWLM().WithFatal() } } @@ -554,7 +554,7 @@ func (r *NnfWorkflowReconciler) validateServerAllocations(ctx context.Context, d } -func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha2.Workflow, s *dwsv1alpha2.Servers, index int, log logr.Logger) (*nnfv1alpha3.NnfStorage, error) { +func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow *dwsv1alpha3.Workflow, s *dwsv1alpha3.Servers, index int, log logr.Logger) (*nnfv1alpha3.NnfStorage, error) { nnfStorage := &nnfv1alpha3.NnfStorage{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name, @@ -564,20 +564,20 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * dwArgs, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("invalid DW directive: %s", workflow.Spec.DWDirectives[index]).WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("invalid DW directive: %s", workflow.Spec.DWDirectives[index]).WithFatal().WithUser() } pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, index) nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } var owner metav1.Object = workflow if dwArgs["command"] == "create_persistent" { psi, err := r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not find PersistentStorageInstance: %v", dwArgs["name"]).WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not find PersistentStorageInstance: %v", dwArgs["name"]).WithError(err).WithFatal() } owner = psi @@ -585,8 +585,8 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfStorage, func() error { - dwsv1alpha2.AddWorkflowLabels(nnfStorage, workflow) - dwsv1alpha2.AddOwnerLabels(nnfStorage, owner) + dwsv1alpha3.AddWorkflowLabels(nnfStorage, workflow) + dwsv1alpha3.AddOwnerLabels(nnfStorage, owner) addDirectiveIndexLabel(nnfStorage, index) addPinnedStorageProfileLabel(nnfStorage, nnfStorageProfile) @@ -687,7 +687,7 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * }) if err != nil { - return nil, dwsv1alpha2.NewResourceError("CreateOrUpdate failed for NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("CreateOrUpdate failed for NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -702,14 +702,14 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * } func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool string) (corev1.ObjectReference, string, error) { - persistentStorageList := &dwsv1alpha2.PersistentStorageInstanceList{} + persistentStorageList := &dwsv1alpha3.PersistentStorageInstanceList{} if err := r.List(ctx, persistentStorageList, client.MatchingLabels(map[string]string{nnfv1alpha3.StandaloneMGTLabel: pool})); err != nil { return corev1.ObjectReference{}, "", err } // Choose an MGS at random from the list of persistent storages if len(persistentStorageList.Items) == 0 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("").WithUserMessage("no MGSs found for pool: %s", pool).WithFatal().WithUser() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("").WithUserMessage("no MGSs found for pool: %s", pool).WithFatal().WithUser() } healthyMgts := make(map[string]corev1.ObjectReference) @@ -723,28 +723,28 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfStorage), nnfStorage); err != nil { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("could not get persistent NnfStorage %v for MGS", client.ObjectKeyFromObject(nnfStorage)).WithError(err) + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("could not get persistent NnfStorage %v for MGS", client.ObjectKeyFromObject(nnfStorage)).WithError(err) } // Do some sanity checks on the NnfStorage to make sure it's really a standalone MGT if nnfStorage.Spec.FileSystemType != "lustre" { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("invalid file systems type '%s' for persistent MGS", nnfStorage.Spec.FileSystemType).WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("invalid file systems type '%s' for persistent MGS", nnfStorage.Spec.FileSystemType).WithFatal() } if len(nnfStorage.Spec.AllocationSets) != 1 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("unexpected number of allocation sets '%d' for persistent MGS", len(nnfStorage.Spec.AllocationSets)).WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("unexpected number of allocation sets '%d' for persistent MGS", len(nnfStorage.Spec.AllocationSets)).WithFatal() } if len(nnfStorage.Spec.AllocationSets[0].Nodes) != 1 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("unexpected number of nodes '%d' in allocation set for persistent MGS", len(nnfStorage.Spec.AllocationSets[0].Nodes)).WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("unexpected number of nodes '%d' in allocation set for persistent MGS", len(nnfStorage.Spec.AllocationSets[0].Nodes)).WithFatal() } if nnfStorage.Spec.AllocationSets[0].Nodes[0].Count != 1 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("unexpected number of count '%d' in node list for persistent MGS", nnfStorage.Spec.AllocationSets[0].Nodes[0].Count).WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("unexpected number of count '%d' in node list for persistent MGS", nnfStorage.Spec.AllocationSets[0].Nodes[0].Count).WithFatal() } if len(nnfStorage.Status.MgsAddress) == 0 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("no LNid listed for persistent MGS").WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("no LNid listed for persistent MGS").WithFatal() } // If the MGT isn't ready, then don't use it @@ -753,7 +753,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s } // Find the DWS Storage resource for the Rabbit that the MGT is on - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nnfStorage.Spec.AllocationSets[0].Nodes[0].Name, Namespace: corev1.NamespaceDefault, @@ -761,17 +761,17 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s } if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("could not get storage resource %v", client.ObjectKeyFromObject(storage)).WithError(err) + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("could not get storage resource %v", client.ObjectKeyFromObject(storage)).WithError(err) } // If the Storage resource says the Rabbit isn't ready, then don't use it - if storage.Status.Status != dwsv1alpha2.ReadyStatus { + if storage.Status.Status != dwsv1alpha3.ReadyStatus { continue } // The MGT is healthy from what we can tell, so add it to the map of healthy MGTs healthyMgts[nnfStorage.Status.MgsAddress] = corev1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.PersistentStorageInstance{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.PersistentStorageInstance{}).Name(), Name: persistentStorage.Name, Namespace: persistentStorage.Namespace, } @@ -779,7 +779,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s // Check to make sure there's at least one MGT we can use if len(healthyMgts) == 0 { - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("").WithUserMessage("no healthy MGSs found for pool: %s", pool).WithMajor() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("").WithUserMessage("no healthy MGSs found for pool: %s", pool).WithMajor() } // Choose an MGT at random from the map @@ -791,7 +791,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s i-- } - return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("no MGS successfully picked. Map length %d", len(healthyMgts)).WithFatal() + return corev1.ObjectReference{}, "", dwsv1alpha3.NewResourceError("no MGS successfully picked. Map length %d", len(healthyMgts)).WithFatal() } func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, path string, log logr.Logger) *lusv1beta1.LustreFileSystem { @@ -810,11 +810,11 @@ func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, return nil } -func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha3.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha3.NnfAccess, error) { +func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha3.NnfStorage, workflow *dwsv1alpha3.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha3.WorkflowState, log logr.Logger) (*nnfv1alpha3.NnfAccess, error) { pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, parentDwIndex) nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() } access := &nnfv1alpha3.NnfAccess{ @@ -826,8 +826,8 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st result, err := ctrl.CreateOrUpdate(ctx, r.Client, access, func() error { - dwsv1alpha2.AddWorkflowLabels(access, workflow) - dwsv1alpha2.AddOwnerLabels(access, workflow) + dwsv1alpha3.AddWorkflowLabels(access, workflow) + dwsv1alpha3.AddOwnerLabels(access, workflow) addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) nnfv1alpha3.AddDataMovementTeardownStateLabel(access, teardownState) @@ -854,7 +854,7 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st }) if err != nil { - return nil, dwsv1alpha2.NewResourceError("CreateOrUpdate failed for NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("CreateOrUpdate failed for NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -866,7 +866,7 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st return access, nil } -func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (string, error) { +func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (string, error) { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) switch dwArgs["command"] { case "jobdw": @@ -881,16 +881,16 @@ func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfStorage), nnfStorage); err != nil { - return "", dwsv1alpha2.NewResourceError("could not get persistent NnfStorage %v to determine file system type", client.ObjectKeyFromObject(nnfStorage)).WithError(err) + return "", dwsv1alpha3.NewResourceError("could not get persistent NnfStorage %v to determine file system type", client.ObjectKeyFromObject(nnfStorage)).WithError(err) } return nnfStorage.Spec.FileSystemType, nil default: - return "", dwsv1alpha2.NewResourceError("invalid directive '%s' to get file system type", workflow.Spec.DWDirectives[index]).WithFatal() + return "", dwsv1alpha3.NewResourceError("invalid directive '%s' to get file system type", workflow.Spec.DWDirectives[index]).WithFatal() } } -func buildComputeMountPath(workflow *dwsv1alpha2.Workflow, index int) string { +func buildComputeMountPath(workflow *dwsv1alpha3.Workflow, index int) string { prefix := os.Getenv("COMPUTE_MOUNT_PREFIX") if len(prefix) == 0 { prefix = "/mnt/nnf" @@ -898,7 +898,7 @@ func buildComputeMountPath(workflow *dwsv1alpha2.Workflow, index int) string { return filepath.Clean(fmt.Sprintf("/%s/%s-%d", prefix, workflow.UID, index)) } -func buildServerMountPath(workflow *dwsv1alpha2.Workflow, index int) string { +func buildServerMountPath(workflow *dwsv1alpha3.Workflow, index int) string { prefix := os.Getenv("SERVER_MOUNT_PREFIX") if len(prefix) == 0 { prefix = "/mnt/nnf" @@ -906,10 +906,10 @@ func buildServerMountPath(workflow *dwsv1alpha2.Workflow, index int) string { return filepath.Clean(fmt.Sprintf("/%s/%s-%d", prefix, workflow.UID, index)) } -func (r *NnfWorkflowReconciler) findPersistentInstance(ctx context.Context, wf *dwsv1alpha2.Workflow, psiName string) (*dwsv1alpha2.PersistentStorageInstance, error) { +func (r *NnfWorkflowReconciler) findPersistentInstance(ctx context.Context, wf *dwsv1alpha3.Workflow, psiName string) (*dwsv1alpha3.PersistentStorageInstance, error) { log := r.Log.WithValues("Workflow", types.NamespacedName{Name: wf.Name, Namespace: wf.Namespace}) - psi := &dwsv1alpha2.PersistentStorageInstance{} + psi := &dwsv1alpha3.PersistentStorageInstance{} psiNamedNamespace := types.NamespacedName{Name: psiName, Namespace: wf.Namespace} err := r.Get(ctx, psiNamedNamespace, psi) if err != nil { @@ -923,12 +923,12 @@ func (r *NnfWorkflowReconciler) findPersistentInstance(ctx context.Context, wf * return psi, nil } -func handleWorkflowError(err error, driverStatus *dwsv1alpha2.WorkflowDriverStatus) { - e, ok := err.(*dwsv1alpha2.ResourceErrorInfo) +func handleWorkflowError(err error, driverStatus *dwsv1alpha3.WorkflowDriverStatus) { + e, ok := err.(*dwsv1alpha3.ResourceErrorInfo) if ok { status, err := e.Severity.ToStatus() if err != nil { - driverStatus.Status = dwsv1alpha2.StatusError + driverStatus.Status = dwsv1alpha3.StatusError driverStatus.Message = "Internal error: " + err.Error() driverStatus.Error = err.Error() } else { @@ -937,16 +937,16 @@ func handleWorkflowError(err error, driverStatus *dwsv1alpha2.WorkflowDriverStat driverStatus.Error = e.Error() } } else { - driverStatus.Status = dwsv1alpha2.StatusError + driverStatus.Status = dwsv1alpha3.StatusError driverStatus.Message = "Internal error: " + err.Error() driverStatus.Error = err.Error() } } -func handleWorkflowErrorByIndex(err error, workflow *dwsv1alpha2.Workflow, index int) { +func handleWorkflowErrorByIndex(err error, workflow *dwsv1alpha3.Workflow, index int) { // Create a list of the driverStatus array elements that correspond to the current state // of the workflow and are targeted for the Rabbit driver - driverList := []*dwsv1alpha2.WorkflowDriverStatus{} + driverList := []*dwsv1alpha3.WorkflowDriverStatus{} driverID := os.Getenv("DWS_DRIVER_ID") for i := range workflow.Status.Drivers { @@ -979,7 +979,7 @@ func handleWorkflowErrorByIndex(err error, workflow *dwsv1alpha2.Workflow, index } // Returns the directive index with the 'name' argument matching name, or -1 if not found -func findDirectiveIndexByName(workflow *dwsv1alpha2.Workflow, name string, command string) int { +func findDirectiveIndexByName(workflow *dwsv1alpha3.Workflow, name string, command string) int { for idx, directive := range workflow.Spec.DWDirectives { parameters, _ := dwdparse.BuildArgsMap(directive) if parameters["name"] == name && parameters["command"] == command { @@ -991,7 +991,7 @@ func findDirectiveIndexByName(workflow *dwsv1alpha2.Workflow, name string, comma // Returns the directive index matching the copy_out directive whose source field references // the provided name argument, or -1 if not found. -func findCopyOutDirectiveIndexByName(workflow *dwsv1alpha2.Workflow, name string) int { +func findCopyOutDirectiveIndexByName(workflow *dwsv1alpha3.Workflow, name string) int { for idx, directive := range workflow.Spec.DWDirectives { if strings.HasPrefix(directive, "#DW copy_out") { parameters, _ := dwdparse.BuildArgsMap(directive) // ignore error, directives are validated in proposal @@ -1008,7 +1008,7 @@ func findCopyOutDirectiveIndexByName(workflow *dwsv1alpha2.Workflow, name string // Returns the directive index matching the container directive whose storage field(s) reference // the provided name argument, or -1 if not found. -func findContainerDirectiveIndexByName(workflow *dwsv1alpha2.Workflow, name string) int { +func findContainerDirectiveIndexByName(workflow *dwsv1alpha3.Workflow, name string) int { for idx, directive := range workflow.Spec.DWDirectives { parameters, _ := dwdparse.BuildArgsMap(directive) // ignore error, directives are validated in proposal if parameters["command"] == "container" { @@ -1069,12 +1069,12 @@ func getRabbitRelativePath(fsType string, storageRef *corev1.ObjectReference, ac } // indexedResourceName returns a name for a workflow child resource based on the index of the #DW directive -func indexedResourceName(workflow *dwsv1alpha2.Workflow, dwIndex int) string { +func indexedResourceName(workflow *dwsv1alpha3.Workflow, dwIndex int) string { return fmt.Sprintf("%s-%d", workflow.Name, dwIndex) } // Returns the pair for the #DW directive at the specified index -func getStorageReferenceNameFromWorkflowActual(workflow *dwsv1alpha2.Workflow, dwdIndex int) (string, string) { +func getStorageReferenceNameFromWorkflowActual(workflow *dwsv1alpha3.Workflow, dwdIndex int) (string, string) { directive := workflow.Spec.DWDirectives[dwdIndex] p, _ := dwdparse.BuildArgsMap(directive) // ignore error, directives were validated in proposal @@ -1094,7 +1094,7 @@ func getStorageReferenceNameFromWorkflowActual(workflow *dwsv1alpha2.Workflow, d } // Returns the pair for the #DW directive in the given DirectiveBreakdown -func getStorageReferenceNameFromDBD(dbd *dwsv1alpha2.DirectiveBreakdown) (string, string) { +func getStorageReferenceNameFromDBD(dbd *dwsv1alpha3.DirectiveBreakdown) (string, string) { var name string namespace := dbd.Namespace @@ -1166,7 +1166,7 @@ func getTargetDirectiveIndexLabel(object metav1.Object) string { return labels[nnfv1alpha3.TargetDirectiveIndexLabel] } -func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int, accessSuffix string) (*result, error) { +func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int, accessSuffix string) (*result, error) { if !(accessSuffix == "computes" || accessSuffix == "servers") { panic(fmt.Sprint("unhandled NnfAccess suffix", accessSuffix)) } @@ -1186,13 +1186,13 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, } teardownState, found := access.Labels[nnfv1alpha3.DataMovementTeardownStateLabel] - if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { + if !found || dwsv1alpha3.WorkflowState(teardownState) == workflow.Status.State { if access.Spec.DesiredState != "unmounted" { access.Spec.DesiredState = "unmounted" if err := r.Update(ctx, access); err != nil { if !apierrors.IsConflict(err) { - return nil, dwsv1alpha2.NewResourceError("could not update NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not update NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) } return Requeue("conflict").withObject(access), nil @@ -1208,14 +1208,14 @@ func (r *NnfWorkflowReconciler) unmountNnfAccessIfNecessary(ctx context.Context, } // Wait on the NnfAccesses for this workflow-index to reach the provided state. -func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int, state string) (*result, error) { +func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int, state string) (*result, error) { accessSuffixes := []string{"-computes"} // Check if we should also wait on the NnfAccess for the servers fsType, err := r.getDirectiveFileSystemType(ctx, workflow, index) if err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to determine directive file system type").WithError(err).WithFatal() + return nil, dwsv1alpha3.NewResourceError("unable to determine directive file system type").WithError(err).WithFatal() } if fsType == "gfs2" || fsType == "lustre" { @@ -1232,7 +1232,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex } if err := r.Get(ctx, client.ObjectKeyFromObject(access), access); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not get NnfAccess: %v", client.ObjectKeyFromObject(access)).WithError(err) } if access.Status.Error != nil { @@ -1250,7 +1250,7 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex // When unmounting, we are conditionally dependent on the workflow state matching the // state of the teardown label, if found. teardownState, found := access.Labels[nnfv1alpha3.DataMovementTeardownStateLabel] - if !found || dwsv1alpha2.WorkflowState(teardownState) == workflow.Status.State { + if !found || dwsv1alpha3.WorkflowState(teardownState) == workflow.Status.State { if access.Status.State != "unmounted" || !access.Status.Ready { return Requeue("pending unmount").withObject(access), nil } @@ -1262,23 +1262,23 @@ func (r *NnfWorkflowReconciler) waitForNnfAccessStateAndReady(ctx context.Contex return nil, nil } -func (r *NnfWorkflowReconciler) addPersistentStorageReference(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) error { +func (r *NnfWorkflowReconciler) addPersistentStorageReference(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) error { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) persistentStorage, err := r.findPersistentInstance(ctx, workflow, dwArgs["name"]) if err != nil { - return dwsv1alpha2.NewResourceError("").WithUserMessage("PersistentStorage '%v' not found", dwArgs["name"]).WithMajor().WithUser() + return dwsv1alpha3.NewResourceError("").WithUserMessage("PersistentStorage '%v' not found", dwArgs["name"]).WithMajor().WithUser() } - if persistentStorage.Status.State != dwsv1alpha2.PSIStateActive { - return dwsv1alpha2.NewResourceError("").WithUserMessage("PersistentStorage is not active").WithFatal().WithUser() + if persistentStorage.Status.State != dwsv1alpha3.PSIStateActive { + return dwsv1alpha3.NewResourceError("").WithUserMessage("PersistentStorage is not active").WithFatal().WithUser() } // Add a consumer reference to the persistent storage for this directive reference := corev1.ObjectReference{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), } found := false @@ -1297,7 +1297,7 @@ func (r *NnfWorkflowReconciler) addPersistentStorageReference(ctx context.Contex return nil } -func (r *NnfWorkflowReconciler) removePersistentStorageReference(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) error { +func (r *NnfWorkflowReconciler) removePersistentStorageReference(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) error { dwArgs, _ := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) persistentStorage, err := r.findPersistentInstance(ctx, workflow, dwArgs["name"]) @@ -1309,7 +1309,7 @@ func (r *NnfWorkflowReconciler) removePersistentStorageReference(ctx context.Con reference := corev1.ObjectReference{ Name: indexedResourceName(workflow, index), Namespace: workflow.Namespace, - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), } for i, existingReference := range persistentStorage.Spec.ConsumerReferences { @@ -1322,7 +1322,7 @@ func (r *NnfWorkflowReconciler) removePersistentStorageReference(ctx context.Con return nil } -func (r *NnfWorkflowReconciler) removeAllPersistentStorageReferences(ctx context.Context, workflow *dwsv1alpha2.Workflow) error { +func (r *NnfWorkflowReconciler) removeAllPersistentStorageReferences(ctx context.Context, workflow *dwsv1alpha3.Workflow) error { for i, directive := range workflow.Spec.DWDirectives { dwArgs, _ := dwdparse.BuildArgsMap(directive) if dwArgs["command"] == "persistentdw" { @@ -1336,7 +1336,7 @@ func (r *NnfWorkflowReconciler) removeAllPersistentStorageReferences(ctx context return nil } -func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, index int, log logr.Logger) (*result, error) { +func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workflow *dwsv1alpha3.Workflow, dwArgs map[string]string, index int, log logr.Logger) (*result, error) { profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { return nil, err @@ -1346,13 +1346,13 @@ func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workfl // Get the targeted NNF nodes for the container jobs nnfNodes, err := r.getNnfNodesFromComputes(ctx, workflow) if err != nil || len(nnfNodes) <= 0 { - return nil, dwsv1alpha2.NewResourceError("error obtaining the target NNF nodes for containers").WithError(err) + return nil, dwsv1alpha3.NewResourceError("error obtaining the target NNF nodes for containers").WithError(err) } // Get the NNF volumes to mount into the containers volumes, result, err := r.getContainerVolumes(ctx, workflow, dwArgs, profile) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not determine the list of volumes needed to create container job for workflow: %s", workflow.Name).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not determine the list of volumes needed to create container job for workflow: %s", workflow.Name).WithError(err) } if result != nil { // a requeue can be returned, so make sure that happens return result, nil @@ -1375,23 +1375,23 @@ func (r *NnfWorkflowReconciler) userContainerHandler(ctx context.Context, workfl if mpiJob { if err := c.createMPIJob(); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to create/update MPIJob").WithMajor().WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to create/update MPIJob").WithMajor().WithError(err) } } else { // For non-MPI jobs, we need to create a service ourselves if err := r.createContainerService(ctx, workflow); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to create/update Container Service").WithMajor().WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to create/update Container Service").WithMajor().WithError(err) } if err := c.createNonMPIJob(); err != nil { - return nil, dwsv1alpha2.NewResourceError("unable to create/update Container Jobs").WithMajor().WithError(err) + return nil, dwsv1alpha3.NewResourceError("unable to create/update Container Jobs").WithMajor().WithError(err) } } return nil, nil } -func (r *NnfWorkflowReconciler) createContainerService(ctx context.Context, workflow *dwsv1alpha2.Workflow) error { +func (r *NnfWorkflowReconciler) createContainerService(ctx context.Context, workflow *dwsv1alpha3.Workflow) error { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Name, @@ -1420,21 +1420,21 @@ func (r *NnfWorkflowReconciler) createContainerService(ctx context.Context, work } // Retrieve the computes for the workflow and find their local nnf nodes -func (r *NnfWorkflowReconciler) getNnfNodesFromComputes(ctx context.Context, workflow *dwsv1alpha2.Workflow) ([]string, error) { +func (r *NnfWorkflowReconciler) getNnfNodesFromComputes(ctx context.Context, workflow *dwsv1alpha3.Workflow) ([]string, error) { ret := []string{} nnfNodes := make(map[string]struct{}) // use a empty struct map to store unique values var computeNodes []string // Get the compute resources - computes := dwsv1alpha2.Computes{ + computes := dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Name, Namespace: workflow.Namespace, }, } if err := r.Get(ctx, client.ObjectKeyFromObject(&computes), &computes); err != nil { - return ret, dwsv1alpha2.NewResourceError("could not find Computes resource for workflow") + return ret, dwsv1alpha3.NewResourceError("could not find Computes resource for workflow") } // Build the list of computes @@ -1442,12 +1442,12 @@ func (r *NnfWorkflowReconciler) getNnfNodesFromComputes(ctx context.Context, wor computeNodes = append(computeNodes, c.Name) } if len(computeNodes) == 0 { - return computeNodes, dwsv1alpha2.NewResourceError("the Computes resources does not specify any compute nodes").WithWLM().WithFatal() + return computeNodes, dwsv1alpha3.NewResourceError("the Computes resources does not specify any compute nodes").WithWLM().WithFatal() } - systemConfig := &dwsv1alpha2.SystemConfiguration{} + systemConfig := &dwsv1alpha3.SystemConfiguration{} if err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: corev1.NamespaceDefault}, systemConfig); err != nil { - return ret, dwsv1alpha2.NewResourceError("could not get system configuration").WithFatal() + return ret, dwsv1alpha3.NewResourceError("could not get system configuration").WithFatal() } // The SystemConfiguration is organized by rabbit. Make a map of computes:rabbit for easy lookup. @@ -1465,7 +1465,7 @@ func (r *NnfWorkflowReconciler) getNnfNodesFromComputes(ctx context.Context, wor for _, c := range computeNodes { nnfNode, found := computeMap[c] if !found { - return ret, dwsv1alpha2.NewResourceError("supplied compute node '%s' not found in SystemConfiguration", c).WithFatal() + return ret, dwsv1alpha3.NewResourceError("supplied compute node '%s' not found in SystemConfiguration", c).WithFatal() } // Add the node to the map @@ -1482,7 +1482,7 @@ func (r *NnfWorkflowReconciler) getNnfNodesFromComputes(ctx context.Context, wor return ret, nil } -func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { // Get profile to determine container job type (MPI or not) profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { @@ -1515,7 +1515,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo if result != nil { // If timeout, don't allow requeue and return an error if timeoutElapsed { - return nil, dwsv1alpha2.NewResourceError("could not retrieve MPIJobs to set timeout"). + return nil, dwsv1alpha3.NewResourceError("could not retrieve MPIJobs to set timeout"). WithUserMessage(timeoutMessage).WithFatal() } return result, nil @@ -1536,10 +1536,10 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo if timeoutElapsed { r.Log.Info("container prerun timeout occurred, attempting to set MPIJob activeDeadlineSeconds") if err := r.setMPIJobTimeout(ctx, workflow, mpiJob, time.Duration(1*time.Millisecond)); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not set timeout on MPIJobs"). + return nil, dwsv1alpha3.NewResourceError("could not set timeout on MPIJobs"). WithUserMessage(timeoutMessage).WithError(err).WithFatal() } else { - return nil, dwsv1alpha2.NewResourceError("MPIJob timeout set").WithUserMessage(timeoutMessage).WithFatal() + return nil, dwsv1alpha3.NewResourceError("MPIJob timeout set").WithUserMessage(timeoutMessage).WithFatal() } } return Requeue(fmt.Sprintf("pending MPIJob start for workflow '%s', index: %d", workflow.Name, index)).after(2 * time.Second), nil @@ -1548,7 +1548,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo jobList, err := r.getContainerJobs(ctx, workflow, index) if err != nil { if timeoutElapsed { - return nil, dwsv1alpha2.NewResourceError("could not retrieve Jobs to set timeout"). + return nil, dwsv1alpha3.NewResourceError("could not retrieve Jobs to set timeout"). WithUserMessage(timeoutMessage).WithFatal().WithError(err) } return nil, err @@ -1558,7 +1558,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo if len(jobList.Items) < 1 { // If timeout, don't allow a requeue and return an error if timeoutElapsed { - return nil, dwsv1alpha2.NewResourceError("no Jobs found in JobList to set timeout"). + return nil, dwsv1alpha3.NewResourceError("no Jobs found in JobList to set timeout"). WithUserMessage(timeoutMessage).WithFatal() } return Requeue(fmt.Sprintf("pending job creation for workflow '%s', index: %d", workflow.Name, index)).after(2 * time.Second), nil @@ -1570,7 +1570,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo if timeoutElapsed { r.Log.Info("container prerun timeout occurred, attempting to set Job activeDeadlineSeconds") if err := r.setJobTimeout(ctx, job, time.Duration(1*time.Millisecond)); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not set timeout on MPIJobs"). + return nil, dwsv1alpha3.NewResourceError("could not set timeout on MPIJobs"). WithUserMessage(timeoutMessage).WithError(err).WithFatal() } else { continue @@ -1590,14 +1590,14 @@ func (r *NnfWorkflowReconciler) waitForContainersToStart(ctx context.Context, wo // Report the timeout error if timeoutElapsed { - return nil, dwsv1alpha2.NewResourceError("job(s) timeout set").WithUserMessage(timeoutMessage).WithFatal() + return nil, dwsv1alpha3.NewResourceError("job(s) timeout set").WithUserMessage(timeoutMessage).WithFatal() } } return nil, nil } -func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { doneMpi := false doneNonMpi := false @@ -1609,7 +1609,7 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * }, } // Add workflow matchLabels + directive index (if desired) - matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) + matchLabels := dwsv1alpha3.MatchingWorkflow(workflow) if index >= 0 { matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) } @@ -1620,12 +1620,12 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * if strings.Contains(err.Error(), "no kind is registered for the type") || apierrors.IsNotFound(err) { doneMpi = true } else { - return nil, dwsv1alpha2.NewResourceError("could not delete container MPIJob(s)").WithError(err).WithMajor().WithInternal() + return nil, dwsv1alpha3.NewResourceError("could not delete container MPIJob(s)").WithError(err).WithMajor().WithInternal() } } else if len(mpiJobList.Items) > 0 { if err := r.DeleteAllOf(ctx, &mpiJobList.Items[0], client.InNamespace(workflow.Namespace), matchLabels, deleteAllOptions); err != nil { if !apierrors.IsNotFound(err) { - return nil, dwsv1alpha2.NewResourceError("could not delete container MPIJob(s)").WithError(err).WithMajor().WithInternal() + return nil, dwsv1alpha3.NewResourceError("could not delete container MPIJob(s)").WithError(err).WithMajor().WithInternal() } } } else { @@ -1638,12 +1638,12 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * if apierrors.IsNotFound(err) { doneNonMpi = true } else { - return nil, dwsv1alpha2.NewResourceError("could not delete container Job(s)").WithError(err).WithMajor().WithInternal() + return nil, dwsv1alpha3.NewResourceError("could not delete container Job(s)").WithError(err).WithMajor().WithInternal() } } else if len(jobList.Items) > 0 { if err := r.DeleteAllOf(ctx, &jobList.Items[0], client.InNamespace(workflow.Namespace), matchLabels, deleteAllOptions); err != nil { if !apierrors.IsNotFound(err) { - return nil, dwsv1alpha2.NewResourceError("could not delete container Job(s)").WithError(err).WithMajor().WithInternal() + return nil, dwsv1alpha3.NewResourceError("could not delete container Job(s)").WithError(err).WithMajor().WithInternal() } } } else { @@ -1657,7 +1657,7 @@ func (r *NnfWorkflowReconciler) deleteContainers(ctx context.Context, workflow * return Requeue("pending container deletion"), nil } -func (r *NnfWorkflowReconciler) getMPIJobConditions(ctx context.Context, workflow *dwsv1alpha2.Workflow, index, expected int) (*mpiv2beta1.MPIJob, *result) { +func (r *NnfWorkflowReconciler) getMPIJobConditions(ctx context.Context, workflow *dwsv1alpha3.Workflow, index, expected int) (*mpiv2beta1.MPIJob, *result) { mpiJob := &mpiv2beta1.MPIJob{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Name, @@ -1698,24 +1698,24 @@ func (r *NnfWorkflowReconciler) setJobTimeout(ctx context.Context, job batchv1.J }) if err != nil { - return dwsv1alpha2.NewResourceError("error updating job '%s' activeDeadlineSeconds:", job.Name) + return dwsv1alpha3.NewResourceError("error updating job '%s' activeDeadlineSeconds:", job.Name) } } return nil } -func (r *NnfWorkflowReconciler) setMPIJobTimeout(ctx context.Context, workflow *dwsv1alpha2.Workflow, mpiJob *mpiv2beta1.MPIJob, timeout time.Duration) error { +func (r *NnfWorkflowReconciler) setMPIJobTimeout(ctx context.Context, workflow *dwsv1alpha3.Workflow, mpiJob *mpiv2beta1.MPIJob, timeout time.Duration) error { // Set the ActiveDeadLineSeconds on each of the k8s jobs created by MPIJob/mpi-operator. We // need to retrieve the jobs in a different way than non-MPI jobs since the jobs are created // by the MPIJob. jobList, err := r.getMPIJobChildrenJobs(ctx, workflow, mpiJob) if err != nil { - return dwsv1alpha2.NewResourceError("setMPIJobTimeout: no MPIJob JobList found for workflow '%s'", workflow.Name).WithMajor() + return dwsv1alpha3.NewResourceError("setMPIJobTimeout: no MPIJob JobList found for workflow '%s'", workflow.Name).WithMajor() } if len(jobList.Items) < 1 { - return dwsv1alpha2.NewResourceError("setMPIJobTimeout: no MPIJob jobs found for workflow '%s'", workflow.Name).WithMajor() + return dwsv1alpha3.NewResourceError("setMPIJobTimeout: no MPIJob jobs found for workflow '%s'", workflow.Name).WithMajor() } for _, job := range jobList.Items { @@ -1727,7 +1727,7 @@ func (r *NnfWorkflowReconciler) setMPIJobTimeout(ctx context.Context, workflow * return nil } -func (r *NnfWorkflowReconciler) waitForContainersToFinish(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) waitForContainersToFinish(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { // Get profile to determine container job type (MPI or not) profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { @@ -1770,7 +1770,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToFinish(ctx context.Context, w } if len(jobList.Items) < 1 { - return nil, dwsv1alpha2.NewResourceError("waitForContainersToFinish: no container jobs found for workflow '%s', index: %d", workflow.Name, index).WithMajor() + return nil, dwsv1alpha3.NewResourceError("waitForContainersToFinish: no container jobs found for workflow '%s', index: %d", workflow.Name, index).WithMajor() } // Ensure all the jobs are done running before we check the conditions. @@ -1788,7 +1788,7 @@ func (r *NnfWorkflowReconciler) waitForContainersToFinish(ctx context.Context, w return nil, nil } -func (r *NnfWorkflowReconciler) checkContainersResults(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) checkContainersResults(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { // Get profile to determine container job type (MPI or not) profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { @@ -1811,10 +1811,10 @@ func (r *NnfWorkflowReconciler) checkContainersResults(ctx context.Context, work for _, c := range mpiJob.Status.Conditions { if c.Type == mpiv2beta1.JobFailed { if c.Reason == "DeadlineExceeded" { - return nil, dwsv1alpha2.NewResourceError("container MPIJob %s (%s): %s", c.Type, c.Reason, c.Message).WithFatal(). + return nil, dwsv1alpha3.NewResourceError("container MPIJob %s (%s): %s", c.Type, c.Reason, c.Message).WithFatal(). WithUserMessage(timeoutMessage) } - return nil, dwsv1alpha2.NewResourceError("container MPIJob %s (%s): %s", c.Type, c.Reason, c.Message).WithFatal(). + return nil, dwsv1alpha3.NewResourceError("container MPIJob %s (%s): %s", c.Type, c.Reason, c.Message).WithFatal(). WithUserMessage("user container(s) failed to run successfully after %d attempts", profile.Data.RetryLimit+1) } } @@ -1825,16 +1825,16 @@ func (r *NnfWorkflowReconciler) checkContainersResults(ctx context.Context, work } if len(jobList.Items) < 1 { - return nil, dwsv1alpha2.NewResourceError("checkContainersResults: no container jobs found for workflow '%s', index: %d", workflow.Name, index).WithMajor() + return nil, dwsv1alpha3.NewResourceError("checkContainersResults: no container jobs found for workflow '%s', index: %d", workflow.Name, index).WithMajor() } for _, job := range jobList.Items { for _, condition := range job.Status.Conditions { if condition.Type != batchv1.JobComplete { if condition.Reason == "DeadlineExceeded" { - return nil, dwsv1alpha2.NewResourceError("container job %s (%s): %s", condition.Type, condition.Reason, condition.Message).WithFatal().WithUserMessage(timeoutMessage) + return nil, dwsv1alpha3.NewResourceError("container job %s (%s): %s", condition.Type, condition.Reason, condition.Message).WithFatal().WithUserMessage(timeoutMessage) } - return nil, dwsv1alpha2.NewResourceError("container job %s (%s): %s", condition.Type, condition.Reason, condition.Message).WithFatal() + return nil, dwsv1alpha3.NewResourceError("container job %s (%s): %s", condition.Type, condition.Reason, condition.Message).WithFatal() } } } @@ -1844,7 +1844,7 @@ func (r *NnfWorkflowReconciler) checkContainersResults(ctx context.Context, work } // Given an MPIJob, return a list of all the k8s Jobs owned by the MPIJob -func (r *NnfWorkflowReconciler) getMPIJobChildrenJobs(ctx context.Context, workflow *dwsv1alpha2.Workflow, mpiJob *mpiv2beta1.MPIJob) (*batchv1.JobList, error) { +func (r *NnfWorkflowReconciler) getMPIJobChildrenJobs(ctx context.Context, workflow *dwsv1alpha3.Workflow, mpiJob *mpiv2beta1.MPIJob) (*batchv1.JobList, error) { // The k8s jobs that are spawned off by MPIJob do not have labels tied to the workflow. // Therefore, we need to get the k8s jobs manually. To do this, we can query the jobs by the // name of the MPIJob. However, this doesn't account for the namespace. We need another way. @@ -1854,7 +1854,7 @@ func (r *NnfWorkflowReconciler) getMPIJobChildrenJobs(ctx context.Context, workf jobList := &batchv1.JobList{} if err := r.List(ctx, jobList, matchLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not retrieve Jobs for MPIJob %s", mpiJob.Name).WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not retrieve Jobs for MPIJob %s", mpiJob.Name).WithError(err).WithMajor() } // Create a new list so we don't alter the loop iterator @@ -1875,38 +1875,38 @@ func (r *NnfWorkflowReconciler) getMPIJobChildrenJobs(ctx context.Context, workf return jobList, nil } -func (r *NnfWorkflowReconciler) getMPIJobs(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*mpiv2beta1.MPIJobList, error) { +func (r *NnfWorkflowReconciler) getMPIJobs(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*mpiv2beta1.MPIJobList, error) { // Get the MPIJobs for this workflow and directive index - matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) + matchLabels := dwsv1alpha3.MatchingWorkflow(workflow) if index >= 0 { matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &mpiv2beta1.MPIJobList{} if err := r.List(ctx, jobList, matchLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not retrieve MPIJobs").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not retrieve MPIJobs").WithError(err).WithMajor() } return jobList, nil } -func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*batchv1.JobList, error) { +func (r *NnfWorkflowReconciler) getContainerJobs(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*batchv1.JobList, error) { // Get the jobs for this workflow and directive index - matchLabels := dwsv1alpha2.MatchingWorkflow(workflow) + matchLabels := dwsv1alpha3.MatchingWorkflow(workflow) if index >= 0 { matchLabels[nnfv1alpha3.DirectiveIndexLabel] = strconv.Itoa(index) } jobList := &batchv1.JobList{} if err := r.List(ctx, jobList, matchLabels); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not retrieve Jobs").WithError(err).WithMajor() + return nil, dwsv1alpha3.NewResourceError("could not retrieve Jobs").WithError(err).WithMajor() } return jobList, nil } // Create a list of volumes to be mounted inside of the containers based on the DW_JOB/DW_PERSISTENT arguments -func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha2.Workflow, dwArgs map[string]string, profile *nnfv1alpha3.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { +func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflow *dwsv1alpha3.Workflow, dwArgs map[string]string, profile *nnfv1alpha3.NnfContainerProfile) ([]nnfContainerVolume, *result, error) { volumes := []nnfContainerVolume{} for arg, val := range dwArgs { @@ -1944,12 +1944,12 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo if cmd == "globaldw" { globalLustre := r.findLustreFileSystemForPath(ctx, val, r.Log) if globalLustre == nil { - return nil, nil, dwsv1alpha2.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", val).WithUser().WithFatal() + return nil, nil, dwsv1alpha3.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' not found", val).WithUser().WithFatal() } ns, nsFound := globalLustre.Spec.Namespaces[workflow.Namespace] if !nsFound || len(ns.Modes) < 1 { - return nil, nil, dwsv1alpha2.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' is not configured for the '%s' namespace", val, workflow.Namespace).WithUser().WithFatal() + return nil, nil, dwsv1alpha3.NewResourceError("").WithUserMessage("global Lustre file system containing '%s' is not configured for the '%s' namespace", val, workflow.Namespace).WithUser().WithFatal() } // Retrieve the desired PVC mode from the container profile. Default to readwritemany. @@ -1969,7 +1969,7 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo // Find the directive index for the given name so we can retrieve its NnfAccess vol.directiveIndex = findDirectiveIndexByName(workflow, vol.directiveName, vol.command) if vol.directiveIndex < 0 { - return nil, nil, dwsv1alpha2.NewResourceError("could not retrieve the directive breakdown for '%s'", vol.directiveName).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not retrieve the directive breakdown for '%s'", vol.directiveName).WithMajor() } nnfAccess := &nnfv1alpha3.NnfAccess{ @@ -1979,7 +1979,7 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo }, } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfAccess), nnfAccess); err != nil { - return nil, nil, dwsv1alpha2.NewResourceError("could not retrieve the NnfAccess '%s'", nnfAccess.Name).WithMajor() + return nil, nil, dwsv1alpha3.NewResourceError("could not retrieve the NnfAccess '%s'", nnfAccess.Name).WithMajor() } if !nnfAccess.Status.Ready { @@ -1995,7 +1995,7 @@ func (r *NnfWorkflowReconciler) getContainerVolumes(ctx context.Context, workflo } // Use the container profile to determine how many ports are needed and request them from the default NnfPortManager -func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { return nil, err @@ -2021,7 +2021,7 @@ func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow Requester: corev1.ObjectReference{ Name: workflow.Name, Namespace: workflow.Namespace, - Kind: reflect.TypeOf(dwsv1alpha2.Workflow{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Workflow{}).Name(), UID: workflow.UID, }, Count: int(profile.Data.NumPorts), @@ -2041,7 +2041,7 @@ func (r *NnfWorkflowReconciler) getContainerPorts(ctx context.Context, workflow } // Ensure that the default NnfPortManager has assigned the appropriate number of requested ports -func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { profile, err := getContainerProfile(ctx, r.Client, workflow, index) if err != nil { @@ -2064,9 +2064,9 @@ func (r *NnfWorkflowReconciler) checkContainerPorts(ctx context.Context, workflo workflow.Status.Env[name] = val return nil, nil // done } else if alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInvalidConfiguration { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: Invalid NnfPortManager configuration").WithFatal().WithUser() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("could not request ports for container workflow: Invalid NnfPortManager configuration").WithFatal().WithUser() } else if alloc.Status == nnfv1alpha3.NnfPortManagerAllocationStatusInsufficientResources { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("could not request ports for container workflow: InsufficientResources").WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("could not request ports for container workflow: InsufficientResources").WithFatal() } } } @@ -2097,8 +2097,8 @@ func getContainerPortManager(ctx context.Context, cl client.Client) (*nnfv1alpha } // Tell the NnfPortManager that the ports are no longer needed -// func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workflow *dwsv1alpha2.Workflow, index int) (*result, error) { -func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workflow *dwsv1alpha2.Workflow) (*result, error) { +// func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workflow *dwsv1alpha3.Workflow, index int) (*result, error) { +func (r *NnfWorkflowReconciler) releaseContainerPorts(ctx context.Context, workflow *dwsv1alpha3.Workflow) (*result, error) { found := false pm, err := getContainerPortManager(ctx, r.Client) diff --git a/internal/controller/nnf_workflow_controller_test.go b/internal/controller/nnf_workflow_controller_test.go index 5cc53ff94..fef3975ac 100644 --- a/internal/controller/nnf_workflow_controller_test.go +++ b/internal/controller/nnf_workflow_controller_test.go @@ -39,7 +39,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -56,7 +56,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { var ( key types.NamespacedName - workflow *dwsv1alpha2.Workflow + workflow *dwsv1alpha3.Workflow setup sync.Once storageProfile *nnfv1alpha3.NnfStorageProfile dmProfile *nnfv1alpha3.NnfDataMovementProfile @@ -99,13 +99,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Namespace: corev1.NamespaceDefault, } - workflow = &dwsv1alpha2.Workflow{ + workflow = &dwsv1alpha3.Workflow{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, }, - Spec: dwsv1alpha2.WorkflowSpec{ - DesiredState: dwsv1alpha2.StateProposal, + Spec: dwsv1alpha3.WorkflowSpec{ + DesiredState: dwsv1alpha3.StateProposal, JobID: intstr.FromString("job 1244"), WLMID: uuid.NewString(), UserID: baseWorkflowUserID, @@ -113,7 +113,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }, } - expected := &dwsv1alpha2.Workflow{} + expected := &dwsv1alpha3.Workflow{} Expect(k8sClient.Get(context.TODO(), key, expected)).ToNot(Succeed()) // Create a default NnfStorageProfile for the unit tests. @@ -128,19 +128,19 @@ var _ = Describe("NNF Workflow Unit Tests", func() { AfterEach(func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateTeardown + workflow.Spec.DesiredState = dwsv1alpha3.StateTeardown return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "teardown") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateTeardown + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateTeardown }).Should(BeTrue(), "reach desired teardown state") Expect(k8sClient.Delete(context.TODO(), workflow)).To(Succeed()) Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present - expected := &dwsv1alpha2.Workflow{} + expected := &dwsv1alpha3.Workflow{} return k8sClient.Get(context.TODO(), key, expected) }).ShouldNot(Succeed()) @@ -157,11 +157,11 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }).ShouldNot(Succeed()) }) - getErroredDriverStatus := func(workflow *dwsv1alpha2.Workflow) *dwsv1alpha2.WorkflowDriverStatus { + getErroredDriverStatus := func(workflow *dwsv1alpha3.Workflow) *dwsv1alpha3.WorkflowDriverStatus { driverID := os.Getenv("DWS_DRIVER_ID") for _, driver := range workflow.Status.Drivers { if driver.DriverID == driverID { - if driver.Status == dwsv1alpha2.StatusError { + if driver.Status == dwsv1alpha3.StatusError { return &driver } } @@ -173,15 +173,15 @@ var _ = Describe("NNF Workflow Unit Tests", func() { By("Fabricate the persistent storage instance") // Create a persistent storage instance to be found - psi := &dwsv1alpha2.PersistentStorageInstance{ + psi := &dwsv1alpha3.PersistentStorageInstance{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: workflow.Namespace}, - Spec: dwsv1alpha2.PersistentStorageInstanceSpec{ + Spec: dwsv1alpha3.PersistentStorageInstanceSpec{ Name: name, FsType: fsType, // DWDirective: workflow.Spec.DWDirectives[0], DWDirective: "#DW create_persistent capacity=1GB name=" + name, - State: dwsv1alpha2.PSIStateActive, + State: dwsv1alpha3.PSIStateActive, }, } Expect(k8sClient.Create(context.TODO(), psi)).To(Succeed()) @@ -208,7 +208,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { deletePersistentStorageInstance := func(name string) { By("delete persistent storage instance") - psi := &dwsv1alpha2.PersistentStorageInstance{ + psi := &dwsv1alpha3.PersistentStorageInstance{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: workflow.Namespace}, } Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(psi), psi)).To(Succeed()) @@ -232,8 +232,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { // We need to be able to pass errors through the PersistentStorageInstance and DirectiveBreakdown // before we can test this /* - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -253,8 +253,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { // We need to be able to pass errors through the PersistentStorageInstance and DirectiveBreakdown // before we can test this /* - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -289,8 +289,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { // We need to be able to pass errors through the PersistentStorageInstance and DirectiveBreakdown // before we can test this /* - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -336,10 +336,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - workflowAfter := &dwsv1alpha2.Workflow{} + workflowAfter := &dwsv1alpha3.Workflow{} Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflowAfter)).To(Succeed()) - if (workflowAfter.Status.Ready == true) && (workflowAfter.Status.State == dwsv1alpha2.StateProposal) && (getErroredDriverStatus(workflowAfter) == nil) { + if (workflowAfter.Status.Ready == true) && (workflowAfter.Status.State == dwsv1alpha3.StateProposal) && (getErroredDriverStatus(workflowAfter) == nil) { return nil } return fmt.Errorf("ready state not achieved") @@ -403,10 +403,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - workflowAfter := &dwsv1alpha2.Workflow{} + workflowAfter := &dwsv1alpha3.Workflow{} Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflowAfter)).To(Succeed()) - if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha2.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { + if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha3.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { return nil } return fmt.Errorf("error state not achieved") @@ -424,10 +424,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - workflowAfter := &dwsv1alpha2.Workflow{} + workflowAfter := &dwsv1alpha3.Workflow{} Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflowAfter)).To(Succeed()) - if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha2.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { + if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha3.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { return nil } return fmt.Errorf("error state not achieved") @@ -460,10 +460,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - workflowAfter := &dwsv1alpha2.Workflow{} + workflowAfter := &dwsv1alpha3.Workflow{} Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflowAfter)).To(Succeed()) - if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha2.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { + if (workflowAfter.Status.Ready == false) && (workflowAfter.Status.State == dwsv1alpha3.StateProposal) && (getErroredDriverStatus(workflowAfter) != nil) { return nil } return fmt.Errorf("error state not achieved") @@ -526,10 +526,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - workflowAfter := &dwsv1alpha2.Workflow{} + workflowAfter := &dwsv1alpha3.Workflow{} Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflowAfter)).To(Succeed()) - if (workflowAfter.Status.Ready == true) && (workflowAfter.Status.State == dwsv1alpha2.StateProposal) && (getErroredDriverStatus(workflowAfter) == nil) { + if (workflowAfter.Status.Ready == true) && (workflowAfter.Status.State == dwsv1alpha3.StateProposal) && (getErroredDriverStatus(workflowAfter) == nil) { return nil } return fmt.Errorf("ready state not achieved") @@ -575,8 +575,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -591,8 +591,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -606,8 +606,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -624,8 +624,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -640,8 +640,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -655,8 +655,8 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Create(context.TODO(), workflow)).To(Succeed(), "create workflow") - Eventually(func() *dwsv1alpha2.WorkflowDriverStatus { - expected := &dwsv1alpha2.Workflow{} + Eventually(func() *dwsv1alpha3.WorkflowDriverStatus { + expected := &dwsv1alpha3.Workflow{} k8sClient.Get(context.TODO(), key, expected) return getErroredDriverStatus(expected) }).ShouldNot(BeNil(), "have an error present") @@ -676,12 +676,12 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return workflow.Status.Ready }).Should(BeTrue(), "waiting for ready after create") - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup Expect(k8sClient.Update(context.TODO(), workflow)).To(Succeed()) Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "transition through setup") }) @@ -761,7 +761,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { By("transition to data in state") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateDataIn + workflow.Spec.DesiredState = dwsv1alpha3.StateDataIn return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to DataIn") @@ -842,7 +842,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateDataIn + workflow.Spec.DesiredState = dwsv1alpha3.StateDataIn return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "transition desired state to DataIn") @@ -891,9 +891,9 @@ var _ = Describe("NNF Workflow Unit Tests", func() { const nodeName = "rabbit-node" var ( - storage *dwsv1alpha2.Storage - directiveBreakdown *dwsv1alpha2.DirectiveBreakdown - servers *dwsv1alpha2.Servers + storage *dwsv1alpha3.Storage + directiveBreakdown *dwsv1alpha3.DirectiveBreakdown + servers *dwsv1alpha3.Servers ) JustBeforeEach(func() { @@ -906,7 +906,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { return workflow.Status.Ready }).Should(BeTrue(), "waiting for ready after create") - directiveBreakdown = &dwsv1alpha2.DirectiveBreakdown{ + directiveBreakdown = &dwsv1alpha3.DirectiveBreakdown{ ObjectMeta: metav1.ObjectMeta{ Name: workflow.Status.DirectiveBreakdowns[0].Name, Namespace: workflow.Status.DirectiveBreakdowns[0].Namespace, @@ -914,7 +914,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { } Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(directiveBreakdown), directiveBreakdown)).To(Succeed()) - servers = &dwsv1alpha2.Servers{ + servers = &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: directiveBreakdown.Status.Storage.Reference.Name, Namespace: directiveBreakdown.Status.Storage.Reference.Namespace, @@ -923,10 +923,10 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(servers), servers)).To(Succeed()) for _, directiveAllocationSet := range directiveBreakdown.Status.Storage.AllocationSets { - allocationSet := dwsv1alpha2.ServersSpecAllocationSet{ + allocationSet := dwsv1alpha3.ServersSpecAllocationSet{ Label: directiveAllocationSet.Label, AllocationSize: directiveAllocationSet.MinimumCapacity, - Storage: []dwsv1alpha2.ServersSpecStorage{ + Storage: []dwsv1alpha3.ServersSpecStorage{ { Name: nodeName, AllocationCount: 1, @@ -953,7 +953,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { }) BeforeEach(func() { - storage = &dwsv1alpha2.Storage{ + storage = &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Namespace: corev1.NamespaceDefault, @@ -965,14 +965,14 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) - storage.Status = dwsv1alpha2.StorageStatus{ + storage.Status = dwsv1alpha3.StorageStatus{ Capacity: 100000000000, - Access: dwsv1alpha2.StorageAccess{ - Protocol: dwsv1alpha2.PCIe, - Servers: []dwsv1alpha2.Node{ + Access: dwsv1alpha3.StorageAccess{ + Protocol: dwsv1alpha3.PCIe, + Servers: []dwsv1alpha3.Node{ { Name: nodeName, - Status: dwsv1alpha2.ReadyStatus, + Status: dwsv1alpha3.ReadyStatus, }, }, }, @@ -997,13 +997,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { It("Succeeds with one allocation per allocation set", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "waiting for ready after setup") }) @@ -1028,13 +1028,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "waiting for ready after setup") }) @@ -1059,13 +1059,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "waiting for ready after setup") }) @@ -1081,13 +1081,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1103,13 +1103,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1125,13 +1125,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1147,13 +1147,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1178,13 +1178,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1206,13 +1206,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1231,13 +1231,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1274,13 +1274,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "waiting for ready after setup") nnfStorage := &nnfv1alpha3.NnfStorage{ @@ -1317,13 +1317,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { It("Succeeds with one allocation per allocation set", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateSetup + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateSetup }).Should(BeTrue(), "waiting for ready after setup") }) @@ -1342,13 +1342,13 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - workflow.Spec.DesiredState = dwsv1alpha2.StateSetup + workflow.Spec.DesiredState = dwsv1alpha3.StateSetup return k8sClient.Update(context.TODO(), workflow) }).Should(Succeed(), "update to Setup") Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.State == dwsv1alpha2.StateSetup && workflow.Status.Status == dwsv1alpha2.StatusError + return workflow.Status.State == dwsv1alpha3.StateSetup && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "waiting for setup state to fail") }) @@ -1476,7 +1476,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { buildWorkflowWithCorrectDirectives() Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateProposal + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateProposal }).Should(BeTrue(), "reach desired Proposal state") Expect(verifyPinnedContainerProfile(context.TODO(), k8sClient, workflow, 2)).To(Succeed()) }, @@ -1492,7 +1492,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { buildWorkflowWithCorrectDirectives() Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Status == dwsv1alpha2.StatusError && strings.Contains(workflow.Status.Message, "container profile") && strings.Contains(workflow.Status.Message, "is restricted to") + return workflow.Status.Status == dwsv1alpha3.StatusError && strings.Contains(workflow.Status.Message, "container profile") && strings.Contains(workflow.Status.Message, "is restricted to") }).Should(BeTrue(), "does not reach desired Proposal state") }, Entry("when restricted to non-matching user ID", &altWorkflowUserID, nil), @@ -1521,7 +1521,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Ready && workflow.Status.State == dwsv1alpha2.StateProposal + return workflow.Status.Ready && workflow.Status.State == dwsv1alpha3.StateProposal }).Should(BeTrue(), "reach desired Proposal state") Expect(verifyPinnedContainerProfile(context.TODO(), k8sClient, workflow, 1)).To(Succeed()) }) @@ -1541,7 +1541,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return !workflow.Status.Ready && workflow.Status.Status == dwsv1alpha2.StatusError + return !workflow.Status.Ready && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "be in error state") }) }) @@ -1566,7 +1566,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return !workflow.Status.Ready && workflow.Status.Status == dwsv1alpha2.StatusError + return !workflow.Status.Ready && workflow.Status.Status == dwsv1alpha3.StatusError }).Should(BeTrue(), "be in error state") }) }) @@ -1614,7 +1614,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { buildContainerWorkflowWithArgs(storageArgsList[argIdx]) Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) - return workflow.Status.Status == dwsv1alpha2.StatusError && + return workflow.Status.Status == dwsv1alpha3.StatusError && strings.Contains(workflow.Status.Message, "not found in container profile") }).Should(BeTrue(), "does not reach desired Proposal state") }, @@ -1659,7 +1659,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) if shouldError { - return workflow.Status.Status == dwsv1alpha2.StatusError && + return workflow.Status.Status == dwsv1alpha3.StatusError && strings.Contains(workflow.Status.Message, "unsupported container filesystem") } else { return workflow.Status.Ready == true @@ -1697,7 +1697,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func(g Gomega) bool { g.Expect(k8sClient.Get(context.TODO(), key, workflow)).To(Succeed()) if shouldError { - return workflow.Status.Status == dwsv1alpha2.StatusError && + return workflow.Status.Status == dwsv1alpha3.StatusError && strings.Contains(workflow.Status.Message, "unsupported container filesystem: "+fsType) } else { return workflow.Status.Ready == true diff --git a/internal/controller/nnfcontainerprofile_helpers.go b/internal/controller/nnfcontainerprofile_helpers.go index 352b5efed..6b238457c 100644 --- a/internal/controller/nnfcontainerprofile_helpers.go +++ b/internal/controller/nnfcontainerprofile_helpers.go @@ -30,26 +30,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/dwdparse" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/go-logr/logr" ) -func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { +func getContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha3.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { profile, err := findPinnedContainerProfile(ctx, clnt, workflow, index) if err != nil { return nil, err } if profile == nil { - return nil, dwsv1alpha2.NewResourceError("container profile '%s' not found", indexedResourceName(workflow, index)).WithFatal() + return nil, dwsv1alpha3.NewResourceError("container profile '%s' not found", indexedResourceName(workflow, index)).WithFatal() } return profile, nil } -func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { +func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha3.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { profile := &nnfv1alpha3.NnfContainerProfile{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, index), @@ -62,13 +62,13 @@ func findPinnedContainerProfile(ctx context.Context, clnt client.Client, workflo } if !profile.Data.Pinned { - return nil, dwsv1alpha2.NewResourceError("expected a pinned container profile '%s', but found one that is not pinned", indexedResourceName(workflow, index)).WithFatal() + return nil, dwsv1alpha3.NewResourceError("expected a pinned container profile '%s', but found one that is not pinned", indexedResourceName(workflow, index)).WithFatal() } return profile, nil } -func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { +func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha3.Workflow, index int) (*nnfv1alpha3.NnfContainerProfile, error) { args, err := dwdparse.BuildArgsMap(workflow.Spec.DWDirectives[index]) if err != nil { return nil, err @@ -91,23 +91,23 @@ func findContainerProfile(ctx context.Context, clnt client.Client, workflow *dws } if profile.Data.Pinned { - return nil, dwsv1alpha2.NewResourceError("expected container profile that is not pinned '%s', but found one that is pinned", indexedResourceName(workflow, index)).WithFatal() + return nil, dwsv1alpha3.NewResourceError("expected container profile that is not pinned '%s', but found one that is pinned", indexedResourceName(workflow, index)).WithFatal() } // Determine whether the profile is restricted to a UserID/GroupID. restrictedMsg := "container profile '%s' is restricted to %s %d" if profile.Data.UserID != nil && *profile.Data.UserID != workflow.Spec.UserID { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage(restrictedMsg, profile.Name, "UserID", *profile.Data.UserID).WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage(restrictedMsg, profile.Name, "UserID", *profile.Data.UserID).WithUser().WithFatal() } if profile.Data.GroupID != nil && *profile.Data.GroupID != workflow.Spec.GroupID { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage(restrictedMsg, profile.Name, "GroupID", *profile.Data.GroupID).WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage(restrictedMsg, profile.Name, "GroupID", *profile.Data.GroupID).WithUser().WithFatal() } return profile, nil } -func createPinnedContainerProfileIfNecessary(ctx context.Context, clnt client.Client, scheme *kruntime.Scheme, workflow *dwsv1alpha2.Workflow, index int, log logr.Logger) error { +func createPinnedContainerProfileIfNecessary(ctx context.Context, clnt client.Client, scheme *kruntime.Scheme, workflow *dwsv1alpha3.Workflow, index int, log logr.Logger) error { profile, err := findPinnedContainerProfile(ctx, clnt, workflow, index) if err != nil && !apierrors.IsNotFound(err) { return err @@ -132,7 +132,7 @@ func createPinnedContainerProfileIfNecessary(ctx context.Context, clnt client.Cl pinnedProfile.Data.Pinned = true - dwsv1alpha2.AddOwnerLabels(pinnedProfile, workflow) + dwsv1alpha3.AddOwnerLabels(pinnedProfile, workflow) if err := controllerutil.SetControllerReference(workflow, pinnedProfile, scheme); err != nil { log.Error(err, "failed to set controller reference on profile", "profile", pinnedProfile) diff --git a/internal/controller/nnfcontainerprofile_test.go b/internal/controller/nnfcontainerprofile_test.go index 4b2536be3..7fae498ba 100644 --- a/internal/controller/nnfcontainerprofile_test.go +++ b/internal/controller/nnfcontainerprofile_test.go @@ -30,7 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -97,7 +97,7 @@ func createBasicNnfContainerProfile(storages []nnfv1alpha3.NnfContainerProfileSt return createNnfContainerProfile(containerProfile, true) } -func verifyPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha2.Workflow, index int) error { +func verifyPinnedContainerProfile(ctx context.Context, clnt client.Client, workflow *dwsv1alpha3.Workflow, index int) error { nnfContainerProfile, err := findPinnedContainerProfile(ctx, clnt, workflow, index) ExpectWithOffset(1, err).NotTo(HaveOccurred()) diff --git a/internal/controller/nnfdatamovementprofile_helpers.go b/internal/controller/nnfdatamovementprofile_helpers.go index a10b65ff6..79b6eb033 100644 --- a/internal/controller/nnfdatamovementprofile_helpers.go +++ b/internal/controller/nnfdatamovementprofile_helpers.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -60,18 +60,18 @@ func findDMProfileToUse(ctx context.Context, clnt client.Client, args map[string } // Require that there be one and only one default. if len(profilesFound) == 0 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find a default NnfDataMovementProfile to use").WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find a default NnfDataMovementProfile to use").WithFatal() } else if len(profilesFound) > 1 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("More than one default NnfDataMovementProfile found; unable to pick one: %v", profilesFound).WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("More than one default NnfDataMovementProfile found; unable to pick one: %v", profilesFound).WithFatal() } profileName = profilesFound[0] } if len(profileName) == 0 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find an NnfDataMovementProfile name").WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find an NnfDataMovementProfile name").WithUser().WithFatal() } err := clnt.Get(ctx, types.NamespacedName{Namespace: profileNamespace, Name: profileName}, NnfDataMovementProfile) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find NnfDataMovementProfile: %s", profileName).WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find NnfDataMovementProfile: %s", profileName).WithUser().WithFatal() } return NnfDataMovementProfile, nil @@ -86,7 +86,7 @@ func findPinnedDMProfile(ctx context.Context, clnt client.Client, namespace stri return nil, err } if !NnfDataMovementProfile.Data.Pinned { - return nil, dwsv1alpha2.NewResourceError("Expected pinned NnfDataMovementProfile, but it was not pinned: %s", pinnedName).WithFatal() + return nil, dwsv1alpha3.NewResourceError("Expected pinned NnfDataMovementProfile, but it was not pinned: %s", pinnedName).WithFatal() } return NnfDataMovementProfile, nil } @@ -121,7 +121,7 @@ func createPinnedDMProfile(ctx context.Context, clnt client.Client, clntScheme * newProfile.Data.Default = false controllerutil.SetControllerReference(owner, newProfile, clntScheme) - dwsv1alpha2.AddOwnerLabels(newProfile, owner) + dwsv1alpha3.AddOwnerLabels(newProfile, owner) err = clnt.Create(ctx, newProfile) if err != nil { if !apierrors.IsAlreadyExists(err) { diff --git a/internal/controller/nnfstorageprofile_helpers.go b/internal/controller/nnfstorageprofile_helpers.go index 70ed33e12..59583e675 100644 --- a/internal/controller/nnfstorageprofile_helpers.go +++ b/internal/controller/nnfstorageprofile_helpers.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -60,18 +60,18 @@ func findProfileToUse(ctx context.Context, clnt client.Client, args map[string]s } // Require that there be one and only one default. if len(profilesFound) == 0 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find a default NnfStorageProfile to use").WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find a default NnfStorageProfile to use").WithFatal() } else if len(profilesFound) > 1 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("More than one default NnfStorageProfile found; unable to pick one: %v", profilesFound).WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("More than one default NnfStorageProfile found; unable to pick one: %v", profilesFound).WithFatal() } profileName = profilesFound[0] } if len(profileName) == 0 { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find an NnfStorageProfile name").WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find an NnfStorageProfile name").WithUser().WithFatal() } err := clnt.Get(ctx, types.NamespacedName{Namespace: profileNamespace, Name: profileName}, nnfStorageProfile) if err != nil { - return nil, dwsv1alpha2.NewResourceError("").WithUserMessage("Unable to find NnfStorageProfile: %s", profileName).WithUser().WithFatal() + return nil, dwsv1alpha3.NewResourceError("").WithUserMessage("Unable to find NnfStorageProfile: %s", profileName).WithUser().WithFatal() } return nnfStorageProfile, nil @@ -86,7 +86,7 @@ func findPinnedProfile(ctx context.Context, clnt client.Client, namespace string return nil, err } if !nnfStorageProfile.Data.Pinned { - return nil, dwsv1alpha2.NewResourceError("Expected pinned NnfStorageProfile, but it was not pinned: %s", pinnedName).WithFatal() + return nil, dwsv1alpha3.NewResourceError("Expected pinned NnfStorageProfile, but it was not pinned: %s", pinnedName).WithFatal() } return nnfStorageProfile, nil } @@ -121,7 +121,7 @@ func createPinnedProfile(ctx context.Context, clnt client.Client, clntScheme *ru newProfile.Data.Default = false controllerutil.SetControllerReference(owner, newProfile, clntScheme) - dwsv1alpha2.AddOwnerLabels(newProfile, owner) + dwsv1alpha3.AddOwnerLabels(newProfile, owner) err = clnt.Create(ctx, newProfile) if err != nil { if !apierrors.IsAlreadyExists(err) { @@ -150,16 +150,16 @@ func addPinnedStorageProfileLabel(object metav1.Object, nnfStorageProfile *nnfv1 func getPinnedStorageProfileFromLabel(ctx context.Context, clnt client.Client, object metav1.Object) (*nnfv1alpha3.NnfStorageProfile, error) { labels := object.GetLabels() if labels == nil { - return nil, dwsv1alpha2.NewResourceError("unable to find labels").WithFatal() + return nil, dwsv1alpha3.NewResourceError("unable to find labels").WithFatal() } pinnedName, okName := labels[nnfv1alpha3.PinnedStorageProfileLabelName] if !okName { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelName).WithFatal() + return nil, dwsv1alpha3.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelName).WithFatal() } pinnedNamespace, okNamespace := labels[nnfv1alpha3.PinnedStorageProfileLabelNameSpace] if !okNamespace { - return nil, dwsv1alpha2.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelNameSpace).WithFatal() + return nil, dwsv1alpha3.NewResourceError("unable to find %s label", nnfv1alpha3.PinnedStorageProfileLabelNameSpace).WithFatal() } return findPinnedProfile(ctx, clnt, pinnedNamespace, pinnedName) diff --git a/internal/controller/nnfsystemstorage_controller.go b/internal/controller/nnfsystemstorage_controller.go index a95517dba..5d4946afe 100644 --- a/internal/controller/nnfsystemstorage_controller.go +++ b/internal/controller/nnfsystemstorage_controller.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" @@ -48,7 +48,7 @@ type NnfSystemStorageReconciler struct { client.Client Log logr.Logger Scheme *kruntime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } const ( @@ -88,7 +88,7 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, nil } - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, nnfSystemStorage) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, nnfSystemStorage) if err != nil { return ctrl.Result{}, err } @@ -164,12 +164,12 @@ func (r *NnfSystemStorageReconciler) Reconcile(ctx context.Context, req ctrl.Req // Get the SystemConfiguration. If a SystemConfiguration is specified in the NnfSystemStorage, use that. // Otherwise, use the default/default SystemConfiguration. -func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (*dwsv1alpha2.SystemConfiguration, error) { - systemConfiguration := &dwsv1alpha2.SystemConfiguration{} +func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (*dwsv1alpha3.SystemConfiguration, error) { + systemConfiguration := &dwsv1alpha3.SystemConfiguration{} if nnfSystemStorage.Spec.SystemConfiguration != (corev1.ObjectReference{}) { - if nnfSystemStorage.Spec.SystemConfiguration.Kind != reflect.TypeOf(dwsv1alpha2.SystemConfiguration{}).Name() { - return nil, dwsv1alpha2.NewResourceError("system configuration is not of kind '%s'", reflect.TypeOf(dwsv1alpha2.SystemConfiguration{}).Name()).WithFatal() + if nnfSystemStorage.Spec.SystemConfiguration.Kind != reflect.TypeOf(dwsv1alpha3.SystemConfiguration{}).Name() { + return nil, dwsv1alpha3.NewResourceError("system configuration is not of kind '%s'", reflect.TypeOf(dwsv1alpha3.SystemConfiguration{}).Name()).WithFatal() } systemConfiguration.ObjectMeta = metav1.ObjectMeta{ @@ -184,7 +184,7 @@ func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, } if err := r.Get(ctx, client.ObjectKeyFromObject(systemConfiguration), systemConfiguration); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get systemconfiguration '%v'", client.ObjectKeyFromObject(systemConfiguration)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not get systemconfiguration '%v'", client.ObjectKeyFromObject(systemConfiguration)).WithError(err) } return systemConfiguration, nil @@ -194,11 +194,11 @@ func (r *NnfSystemStorageReconciler) getSystemConfiguration(ctx context.Context, // specified in the NnfSystemStorage spec, and it must be marked as pinned. func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfSystemStorage *nnfv1alpha3.NnfSystemStorage) (*nnfv1alpha3.NnfStorageProfile, error) { if nnfSystemStorage.Spec.StorageProfile == (corev1.ObjectReference{}) { - return nil, dwsv1alpha2.NewResourceError("StorageProfile must be specified").WithFatal() + return nil, dwsv1alpha3.NewResourceError("StorageProfile must be specified").WithFatal() } if nnfSystemStorage.Spec.StorageProfile.Kind != reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name() { - return nil, dwsv1alpha2.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name()).WithFatal() + return nil, dwsv1alpha3.NewResourceError("StorageProfile is not of kind '%s'", reflect.TypeOf(nnfv1alpha3.NnfStorageProfile{}).Name()).WithFatal() } storageProfile := &nnfv1alpha3.NnfStorageProfile{ @@ -209,7 +209,7 @@ func (r *NnfSystemStorageReconciler) getStorageProfile(ctx context.Context, nnfS } if err := r.Get(ctx, client.ObjectKeyFromObject(storageProfile), storageProfile); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get StorageProfile '%v'", client.ObjectKeyFromObject(storageProfile)).WithError(err) + return nil, dwsv1alpha3.NewResourceError("could not get StorageProfile '%v'", client.ObjectKeyFromObject(storageProfile)).WithError(err) } return storageProfile, nil @@ -226,7 +226,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste if len(nnfSystemStorage.Spec.IncludeRabbits) != 0 { if len(nnfSystemStorage.Spec.ExcludeRabbits) != 0 { - return dwsv1alpha2.NewResourceError("IncludeRabbits and ExcludeRabbits can not be used together").WithFatal() + return dwsv1alpha3.NewResourceError("IncludeRabbits and ExcludeRabbits can not be used together").WithFatal() } rabbitList = append([]string(nil), nnfSystemStorage.Spec.IncludeRabbits...) @@ -262,7 +262,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste if nnfSystemStorage.Spec.ExcludeDisabledRabbits { tempRabbitList := rabbitList[:0] for _, rabbit := range rabbitList { - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: rabbit, Namespace: corev1.NamespaceDefault, @@ -270,7 +270,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste } if err := r.Get(ctx, client.ObjectKeyFromObject(storage), storage); err != nil { - return dwsv1alpha2.NewResourceError("could not get Storage '%v'", client.ObjectKeyFromObject(storage)).WithError(err) + return dwsv1alpha3.NewResourceError("could not get Storage '%v'", client.ObjectKeyFromObject(storage)).WithError(err) } labels := storage.GetLabels() @@ -278,11 +278,11 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste continue } - if storageType := labels[dwsv1alpha2.StorageTypeLabel]; storageType != "Rabbit" { + if storageType := labels[dwsv1alpha3.StorageTypeLabel]; storageType != "Rabbit" { continue } - if storage.Spec.State == dwsv1alpha2.DisabledState || storage.Status.Status != dwsv1alpha2.ReadyStatus { + if storage.Spec.State == dwsv1alpha3.DisabledState || storage.Status.Status != dwsv1alpha3.ReadyStatus { continue } @@ -292,7 +292,7 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste } // Use the Rabbit list to fill in the servers resource with one allocation per Rabbit - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -301,25 +301,25 @@ func (r *NnfSystemStorageReconciler) createServers(ctx context.Context, nnfSyste result, err := ctrl.CreateOrUpdate(ctx, r.Client, servers, func() error { - dwsv1alpha2.AddOwnerLabels(servers, nnfSystemStorage) + dwsv1alpha3.AddOwnerLabels(servers, nnfSystemStorage) addDirectiveIndexLabel(servers, 0) - servers.Spec.AllocationSets = []dwsv1alpha2.ServersSpecAllocationSet{{ + servers.Spec.AllocationSets = []dwsv1alpha3.ServersSpecAllocationSet{{ Label: "system-storage", AllocationSize: nnfSystemStorage.Spec.Capacity, }} - servers.Spec.AllocationSets[0].Storage = []dwsv1alpha2.ServersSpecStorage{} + servers.Spec.AllocationSets[0].Storage = []dwsv1alpha3.ServersSpecStorage{} for _, rabbitName := range rabbitList { - servers.Spec.AllocationSets[0].Storage = append(servers.Spec.AllocationSets[0].Storage, dwsv1alpha2.ServersSpecStorage{Name: rabbitName, AllocationCount: 1}) + servers.Spec.AllocationSets[0].Storage = append(servers.Spec.AllocationSets[0].Storage, dwsv1alpha3.ServersSpecStorage{Name: rabbitName, AllocationCount: 1}) } return ctrl.SetControllerReference(nnfSystemStorage, servers, r.Scheme) }) if err != nil { - return dwsv1alpha2.NewResourceError("CreateOrUpdate failed for servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) + return dwsv1alpha3.NewResourceError("CreateOrUpdate failed for servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -346,7 +346,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst if len(nnfSystemStorage.Spec.IncludeComputes) != 0 { if len(nnfSystemStorage.Spec.ExcludeComputes) != 0 { - return dwsv1alpha2.NewResourceError("IncludeComputes and ExcludeComputes can not be used together").WithFatal() + return dwsv1alpha3.NewResourceError("IncludeComputes and ExcludeComputes can not be used together").WithFatal() } computeList = append([]string(nil), nnfSystemStorage.Spec.IncludeComputes...) @@ -356,14 +356,14 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst return err } - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), }, } if err := r.Get(ctx, client.ObjectKeyFromObject(servers), servers); err != nil { - return dwsv1alpha2.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) + return dwsv1alpha3.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) } // Create a map of the Rabbit node names so it's easy to search @@ -384,7 +384,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst case nnfv1alpha3.ComputesTargetPattern: indexList = append([]int(nil), nnfSystemStorage.Spec.ComputesPattern...) default: - return dwsv1alpha2.NewResourceError("undexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() + return dwsv1alpha3.NewResourceError("undexpected ComputesTarget type '%s'", nnfSystemStorage.Spec.ComputesTarget).WithFatal() } indexMap := map[int]bool{} @@ -423,7 +423,7 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst } // Create a computes resource using the list of compute nodes. - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -432,20 +432,20 @@ func (r *NnfSystemStorageReconciler) createComputes(ctx context.Context, nnfSyst result, err := ctrl.CreateOrUpdate(ctx, r.Client, computes, func() error { - dwsv1alpha2.AddOwnerLabels(computes, nnfSystemStorage) + dwsv1alpha3.AddOwnerLabels(computes, nnfSystemStorage) addDirectiveIndexLabel(computes, 0) - computes.Data = []dwsv1alpha2.ComputesData{} + computes.Data = []dwsv1alpha3.ComputesData{} for _, computeName := range computeList { - computes.Data = append(computes.Data, dwsv1alpha2.ComputesData{Name: computeName}) + computes.Data = append(computes.Data, dwsv1alpha3.ComputesData{Name: computeName}) } return ctrl.SetControllerReference(nnfSystemStorage, computes, r.Scheme) }) if err != nil { - return dwsv1alpha2.NewResourceError("CreateOrUpdate failed for computes: %v", client.ObjectKeyFromObject(computes)).WithError(err) + return dwsv1alpha3.NewResourceError("CreateOrUpdate failed for computes: %v", client.ObjectKeyFromObject(computes)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -468,7 +468,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy return err } - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -476,7 +476,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy } if err := r.Get(ctx, client.ObjectKeyFromObject(servers), servers); err != nil { - return dwsv1alpha2.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) + return dwsv1alpha3.NewResourceError("could not get Servers: %v", client.ObjectKeyFromObject(servers)).WithError(err) } nnfStorage := &nnfv1alpha3.NnfStorage{ @@ -488,7 +488,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfStorage, func() error { - dwsv1alpha2.AddOwnerLabels(nnfStorage, nnfSystemStorage) + dwsv1alpha3.AddOwnerLabels(nnfStorage, nnfSystemStorage) addDirectiveIndexLabel(nnfStorage, 0) addPinnedStorageProfileLabel(nnfStorage, storageProfile) @@ -520,7 +520,7 @@ func (r *NnfSystemStorageReconciler) createNnfStorage(ctx context.Context, nnfSy }) if err != nil { - return dwsv1alpha2.NewResourceError("CreateOrUpdate failed for NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) + return dwsv1alpha3.NewResourceError("CreateOrUpdate failed for NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -544,7 +544,7 @@ func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfS }, } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfStorage), nnfStorage); err != nil { - return true, dwsv1alpha2.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) + return true, dwsv1alpha3.NewResourceError("could not get NnfStorage: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(err) } // If the Status section has not been filled in yet, exit and wait. @@ -553,7 +553,7 @@ func (r *NnfSystemStorageReconciler) waitForNnfStorage(ctx context.Context, nnfS } if nnfStorage.Status.Error != nil { - return true, dwsv1alpha2.NewResourceError("storage resource error: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(nnfStorage.Status.Error) + return true, dwsv1alpha3.NewResourceError("storage resource error: %v", client.ObjectKeyFromObject(nnfStorage)).WithError(nnfStorage.Status.Error) } if !nnfStorage.Status.Ready { @@ -584,11 +584,11 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys // Create an NNFAccess for the compute clients result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfAccess, func() error { - dwsv1alpha2.AddOwnerLabels(nnfAccess, nnfSystemStorage) + dwsv1alpha3.AddOwnerLabels(nnfAccess, nnfSystemStorage) addPinnedStorageProfileLabel(nnfAccess, storageProfile) addDirectiveIndexLabel(nnfAccess, 0) - nnfAccess.Spec.TeardownState = dwsv1alpha2.StatePostRun + nnfAccess.Spec.TeardownState = dwsv1alpha3.StatePostRun nnfAccess.Spec.DesiredState = "mounted" nnfAccess.Spec.UserID = 0 nnfAccess.Spec.GroupID = 0 @@ -598,7 +598,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys nnfAccess.Spec.ClientReference = corev1.ObjectReference{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), - Kind: reflect.TypeOf(dwsv1alpha2.Computes{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Computes{}).Name(), } nnfAccess.Spec.StorageReference = corev1.ObjectReference{ @@ -610,7 +610,7 @@ func (r *NnfSystemStorageReconciler) createNnfAccess(ctx context.Context, nnfSys return ctrl.SetControllerReference(nnfSystemStorage, nnfAccess, r.Scheme) }) if err != nil { - return dwsv1alpha2.NewResourceError("Could not CreateOrUpdate compute node NnfAccess: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(err) + return dwsv1alpha3.NewResourceError("Could not CreateOrUpdate compute node NnfAccess: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -634,11 +634,11 @@ func (r *NnfSystemStorageReconciler) waitForNnfAccess(ctx context.Context, nnfSy } if err := r.Get(ctx, client.ObjectKeyFromObject(nnfAccess), nnfAccess); err != nil { - return true, dwsv1alpha2.NewResourceError("could not get NnfAccess: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(err) + return true, dwsv1alpha3.NewResourceError("could not get NnfAccess: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(err) } if nnfAccess.Status.Error != nil { - return true, dwsv1alpha2.NewResourceError("NnfAccess resource error: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(nnfAccess.Status.Error) + return true, dwsv1alpha3.NewResourceError("NnfAccess resource error: %v", client.ObjectKeyFromObject(nnfAccess)).WithError(nnfAccess.Status.Error) } if nnfAccess.Status.State != nnfAccess.Spec.DesiredState { @@ -674,19 +674,19 @@ func (r *NnfSystemStorageReconciler) NnfSystemStorageEnqueueAll(ctx context.Cont // SetupWithManager sets up the controller with the Manager. func (r *NnfSystemStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ + r.ChildObjects = []dwsv1alpha3.ObjectList{ &nnfv1alpha3.NnfAccessList{}, &nnfv1alpha3.NnfStorageList{}, - &dwsv1alpha2.ComputesList{}, - &dwsv1alpha2.ServersList{}, + &dwsv1alpha3.ComputesList{}, + &dwsv1alpha3.ServersList{}, } return ctrl.NewControllerManagedBy(mgr). For(&nnfv1alpha3.NnfSystemStorage{}). - Owns(&dwsv1alpha2.Computes{}). - Owns(&dwsv1alpha2.Servers{}). + Owns(&dwsv1alpha3.Computes{}). + Owns(&dwsv1alpha3.Servers{}). Owns(&nnfv1alpha3.NnfStorage{}). Owns(&nnfv1alpha3.NnfAccess{}). - Watches(&dwsv1alpha2.Storage{}, handler.EnqueueRequestsFromMapFunc(r.NnfSystemStorageEnqueueAll)). + Watches(&dwsv1alpha3.Storage{}, handler.EnqueueRequestsFromMapFunc(r.NnfSystemStorageEnqueueAll)). Complete(r) } diff --git a/internal/controller/nnfsystemstorage_controller_test.go b/internal/controller/nnfsystemstorage_controller_test.go index df370eb5b..9ec5aebb2 100644 --- a/internal/controller/nnfsystemstorage_controller_test.go +++ b/internal/controller/nnfsystemstorage_controller_test.go @@ -32,7 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" ) @@ -45,7 +45,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { nnfNodes := [2]*nnfv1alpha3.NnfNode{} nodes := [2]*corev1.Node{} - var systemConfiguration *dwsv1alpha2.SystemConfiguration + var systemConfiguration *dwsv1alpha3.SystemConfiguration var storageProfile *nnfv1alpha3.NnfStorageProfile var setup sync.Once @@ -57,18 +57,18 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { } }) - systemConfiguration = &dwsv1alpha2.SystemConfiguration{ + systemConfiguration = &dwsv1alpha3.SystemConfiguration{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "default", Namespace: corev1.NamespaceDefault, }, - Spec: dwsv1alpha2.SystemConfigurationSpec{ - StorageNodes: []dwsv1alpha2.SystemConfigurationStorageNode{ + Spec: dwsv1alpha3.SystemConfigurationSpec{ + StorageNodes: []dwsv1alpha3.SystemConfigurationStorageNode{ { Type: "Rabbit", Name: nodeNames[0], - ComputesAccess: []dwsv1alpha2.SystemConfigurationComputeNodeReference{ + ComputesAccess: []dwsv1alpha3.SystemConfigurationComputeNodeReference{ { Name: "0-0", Index: 0, @@ -138,7 +138,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { { Type: "Rabbit", Name: nodeNames[1], - ComputesAccess: []dwsv1alpha2.SystemConfigurationComputeNodeReference{ + ComputesAccess: []dwsv1alpha3.SystemConfigurationComputeNodeReference{ { Name: "1-0", Index: 0, @@ -249,7 +249,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { return k8sClient.Update(context.TODO(), nnfNodes[i]) }).Should(Succeed(), "set LNet Nid in NnfNode") - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Namespace: corev1.NamespaceDefault, @@ -287,7 +287,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { } Expect(k8sClient.Delete(context.TODO(), systemConfiguration)).To(Succeed()) - tempConfig := &dwsv1alpha2.SystemConfiguration{} + tempConfig := &dwsv1alpha3.SystemConfiguration{} Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(systemConfiguration), tempConfig) }).ShouldNot(Succeed()) @@ -320,7 +320,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { return nnfSystemStorage.Status.Ready }).Should(BeTrue()) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -333,7 +333,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Expect(servers.Spec.AllocationSets).To(HaveLen(1)) Expect(servers.Spec.AllocationSets[0].Storage).To(HaveLen(2)) - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -379,7 +379,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { return nnfSystemStorage.Status.Ready }).Should(BeTrue()) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -392,7 +392,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Expect(servers.Spec.AllocationSets).To(HaveLen(1)) Expect(servers.Spec.AllocationSets[0].Storage).To(HaveLen(2)) - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -439,7 +439,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { return nnfSystemStorage.Status.Ready }).Should(BeTrue()) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -452,7 +452,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { Expect(servers.Spec.AllocationSets).To(HaveLen(1)) Expect(servers.Spec.AllocationSets[0].Storage).To(HaveLen(2)) - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -500,7 +500,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { return nnfSystemStorage.Status.Ready }).Should(BeTrue()) - servers := &dwsv1alpha2.Servers{ + servers := &dwsv1alpha3.Servers{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), @@ -517,7 +517,7 @@ var _ = Describe("NnfSystemStorage Controller Test", func() { "AllocationCount": Equal(1), })) - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: nnfSystemStorage.GetName(), Namespace: nnfSystemStorage.GetNamespace(), diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 3fd884ab1..bc1f71a38 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -46,7 +46,7 @@ import ( "github.com/ghodss/yaml" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" lusv1beta1 "github.com/NearNodeFlash/lustre-fs-operator/api/v1beta1" nnf "github.com/NearNodeFlash/nnf-ec/pkg" @@ -95,8 +95,8 @@ var envVars = []envSetting{ {"NNF_TEST_ENVIRONMENT", "true"}, } -func loadNNFDWDirectiveRuleset(filename string) (dwsv1alpha2.DWDirectiveRule, error) { - ruleset := dwsv1alpha2.DWDirectiveRule{} +func loadNNFDWDirectiveRuleset(filename string) (dwsv1alpha3.DWDirectiveRule, error) { + ruleset := dwsv1alpha3.DWDirectiveRule{} bytes, err := ioutil.ReadFile(filename) if err != nil { @@ -136,7 +136,7 @@ var _ = BeforeSuite(func() { // before calling envtest.Start(). // Then add the scheme to envtest.CRDInstallOptions. - err = dwsv1alpha2.AddToScheme(scheme.Scheme) + err = dwsv1alpha3.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = lusv1beta1.AddToScheme(scheme.Scheme) @@ -209,7 +209,7 @@ var _ = BeforeSuite(func() { Start webhooks */ - err = (&dwsv1alpha2.Workflow{}).SetupWebhookWithManager(k8sManager) + err = (&dwsv1alpha3.Workflow{}).SetupWebhookWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) err = (&lusv1beta1.LustreFileSystem{}).SetupWebhookWithManager(k8sManager) diff --git a/mount-daemon/main.go b/mount-daemon/main.go index 9d383b2f1..7cd3d1908 100644 --- a/mount-daemon/main.go +++ b/mount-daemon/main.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/log/zap" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" nnfv1alpha3 "github.com/NearNodeFlash/nnf-sos/api/v1alpha3" controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" "github.com/NearNodeFlash/nnf-sos/mount-daemon/version" @@ -68,7 +68,7 @@ type Service struct { func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) + utilruntime.Must(dwsv1alpha3.AddToScheme(scheme)) utilruntime.Must(nnfv1alpha3.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_types.go index dd202a626..2a251ce2f 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -190,7 +190,6 @@ type ClientMountStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.allReady",description="True if desired state is achieved" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_types.go index e945a7c26..7b647db9c 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021, 2022 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -31,7 +31,6 @@ type ComputesData struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion // Computes is the Schema for the computes API type Computes struct { diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/conversion.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/conversion.go index 0f99d224c..74eb9f4ff 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/conversion.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/conversion.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -19,23 +19,410 @@ package v1alpha2 -func (*ClientMount) Hub() {} -func (*Computes) Hub() {} -func (*DWDirectiveRule) Hub() {} -func (*DirectiveBreakdown) Hub() {} -func (*PersistentStorageInstance) Hub() {} -func (*Servers) Hub() {} -func (*Storage) Hub() {} -func (*SystemConfiguration) Hub() {} -func (*Workflow) Hub() {} - -// The conversion-verifier tool wants these...though they're never used. -func (*ClientMountList) Hub() {} -func (*ComputesList) Hub() {} -func (*DWDirectiveRuleList) Hub() {} -func (*DirectiveBreakdownList) Hub() {} -func (*PersistentStorageInstanceList) Hub() {} -func (*ServersList) Hub() {} -func (*StorageList) Hub() {} -func (*SystemConfigurationList) Hub() {} -func (*WorkflowList) Hub() {} +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + apiconversion "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" + utilconversion "github.com/DataWorkflowServices/dws/github/cluster-api/util/conversion" +) + +var convertlog = logf.Log.V(2).WithName("convert-v1alpha2") + +func (src *ClientMount) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert ClientMount To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.ClientMount) + + if err := Convert_v1alpha2_ClientMount_To_v1alpha3_ClientMount(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.ClientMount{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *ClientMount) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.ClientMount) + convertlog.Info("Convert ClientMount From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_ClientMount_To_v1alpha2_ClientMount(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *Computes) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert Computes To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.Computes) + + if err := Convert_v1alpha2_Computes_To_v1alpha3_Computes(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.Computes{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *Computes) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.Computes) + convertlog.Info("Convert Computes From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_Computes_To_v1alpha2_Computes(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *DWDirectiveRule) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert DWDirectiveRule To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.DWDirectiveRule) + + if err := Convert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.DWDirectiveRule{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *DWDirectiveRule) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.DWDirectiveRule) + convertlog.Info("Convert DWDirectiveRule From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *DirectiveBreakdown) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert DirectiveBreakdown To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.DirectiveBreakdown) + + if err := Convert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.DirectiveBreakdown{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *DirectiveBreakdown) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.DirectiveBreakdown) + convertlog.Info("Convert DirectiveBreakdown From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *PersistentStorageInstance) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert PersistentStorageInstance To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.PersistentStorageInstance) + + if err := Convert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.PersistentStorageInstance{} + hasAnno, err := utilconversion.UnmarshalData(src, restored) + if err != nil { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + if hasAnno { + dst.Spec.State = restored.Spec.State + dst.Status.State = restored.Status.State + dst.Status.Ready = restored.Status.Ready + } + + return nil +} + +func (dst *PersistentStorageInstance) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.PersistentStorageInstance) + convertlog.Info("Convert PersistentStorageInstance From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(src, dst, nil); err != nil { + return err + } + + if src.Spec.State == dwsv1alpha3.PSIStateEnabled { + dst.Spec.State = PSIStateActive + if src.Status.State == dwsv1alpha3.PSIStateEnabled && src.Status.Ready == true { + dst.Status.State = PSIStateActive + } else { + dst.Status.State = PSIStateCreating + } + } else if src.Spec.State == dwsv1alpha3.PSIStateDisabled { + dst.Spec.State = PSIStateDestroying + if src.Status.State == dwsv1alpha3.PSIStateDisabled && src.Status.Ready == true { + dst.Status.State = PSIStateDestroying + } else { + dst.Status.State = PSIStateActive + } + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *Servers) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert Servers To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.Servers) + + if err := Convert_v1alpha2_Servers_To_v1alpha3_Servers(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.Servers{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *Servers) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.Servers) + convertlog.Info("Convert Servers From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_Servers_To_v1alpha2_Servers(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *Storage) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert Storage To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.Storage) + + if err := Convert_v1alpha2_Storage_To_v1alpha3_Storage(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.Storage{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *Storage) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.Storage) + convertlog.Info("Convert Storage From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_Storage_To_v1alpha2_Storage(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *SystemConfiguration) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert SystemConfiguration To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.SystemConfiguration) + + if err := Convert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.SystemConfiguration{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *SystemConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.SystemConfiguration) + convertlog.Info("Convert SystemConfiguration From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +func (src *Workflow) ConvertTo(dstRaw conversion.Hub) error { + convertlog.Info("Convert Workflow To Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + dst := dstRaw.(*dwsv1alpha3.Workflow) + + if err := Convert_v1alpha2_Workflow_To_v1alpha3_Workflow(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &dwsv1alpha3.Workflow{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + // EDIT THIS FUNCTION! If the annotation is holding anything that is + // hub-specific then copy it into 'dst' from 'restored'. + // Otherwise, you may comment out UnmarshalData() until it's needed. + + return nil +} + +func (dst *Workflow) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*dwsv1alpha3.Workflow) + convertlog.Info("Convert Workflow From Hub", "name", src.GetName(), "namespace", src.GetNamespace()) + + if err := Convert_v1alpha3_Workflow_To_v1alpha2_Workflow(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata. + return utilconversion.MarshalData(src, dst) +} + +// The List-based ConvertTo/ConvertFrom routines are never used by the +// conversion webhook, but the conversion-verifier tool wants to see them. +// The conversion-gen tool generated the Convert_X_to_Y routines, should they +// ever be needed. + +func resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: "dws", Resource: resource} +} + +func (src *ClientMountList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ClientMountList"), "ConvertTo") +} + +func (dst *ClientMountList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ClientMountList"), "ConvertFrom") +} + +func (src *ComputesList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ComputesList"), "ConvertTo") +} + +func (dst *ComputesList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ComputesList"), "ConvertFrom") +} + +func (src *DWDirectiveRuleList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("DWDirectiveRuleList"), "ConvertTo") +} + +func (dst *DWDirectiveRuleList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("DWDirectiveRuleList"), "ConvertFrom") +} + +func (src *DirectiveBreakdownList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("DirectiveBreakdownList"), "ConvertTo") +} + +func (dst *DirectiveBreakdownList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("DirectiveBreakdownList"), "ConvertFrom") +} + +func (src *PersistentStorageInstanceList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("PersistentStorageInstanceList"), "ConvertTo") +} + +func (dst *PersistentStorageInstanceList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("PersistentStorageInstanceList"), "ConvertFrom") +} + +func (src *ServersList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ServersList"), "ConvertTo") +} + +func (dst *ServersList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("ServersList"), "ConvertFrom") +} + +func (src *StorageList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("StorageList"), "ConvertTo") +} + +func (dst *StorageList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("StorageList"), "ConvertFrom") +} + +func (src *SystemConfigurationList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("SystemConfigurationList"), "ConvertTo") +} + +func (dst *SystemConfigurationList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("SystemConfigurationList"), "ConvertFrom") +} + +func (src *WorkflowList) ConvertTo(dstRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("WorkflowList"), "ConvertTo") +} + +func (dst *WorkflowList) ConvertFrom(srcRaw conversion.Hub) error { + return apierrors.NewMethodNotSupported(resource("WorkflowList"), "ConvertFrom") +} + +func Convert_v1alpha3_PersistentStorageInstanceStatus_To_v1alpha2_PersistentStorageInstanceStatus(in *dwsv1alpha3.PersistentStorageInstanceStatus, out *PersistentStorageInstanceStatus, s apiconversion.Scope) error { + return autoConvert_v1alpha3_PersistentStorageInstanceStatus_To_v1alpha2_PersistentStorageInstanceStatus(in, out, s) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_types.go index dfbc5d128..d73c6bc21 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_types.go @@ -190,7 +190,6 @@ type DirectiveBreakdownStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if allocation sets have been generated" //+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/doc.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/doc.go new file mode 100644 index 000000000..5f6d4026f --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/doc.go @@ -0,0 +1,23 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// The following tag tells conversion-gen to generate conversion routines, and +// it tells conversion-gen the name of the hub version. +// +k8s:conversion-gen=github.com/DataWorkflowServices/dws/api/v1alpha3 +package v1alpha2 diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_types.go index e5b352f66..a096822fb 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -27,7 +27,6 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. //+kubebuilder:object:root=true -//+kubebuilder:storageversion // DWDirectiveRule is the Schema for the DWDirective API type DWDirectiveRule struct { diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/groupversion_info.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/groupversion_info.go index 22374c4f4..5d53187b3 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/groupversion_info.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/groupversion_info.go @@ -1,5 +1,5 @@ /* - * Copyright 2023 Hewlett Packard Enterprise Development LP + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -36,4 +36,7 @@ var ( // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme + + // Used by zz_generated.conversion.go. + localSchemeBuilder = SchemeBuilder.SchemeBuilder ) diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_types.go index 346a60f80..f79de0409 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -89,7 +89,6 @@ type PersistentStorageInstanceStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_types.go index 5870a731e..327dcbf48 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -87,7 +87,6 @@ type ServersStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if allocation sets have been generated" //+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go index 55cb448d9..ead68ef97 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_types.go @@ -137,7 +137,6 @@ type StorageStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="State",type="string",JSONPath=".spec.state",description="State of the storage resource" //+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status",description="Status of the storage resource" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go index 2c65e3a00..c2aceb1fc 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_types.go @@ -88,7 +88,6 @@ type SystemConfigurationStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if SystemConfiguration is reconciled" //+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go index d7df059bc..1f3614397 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -250,7 +250,6 @@ type WorkflowStatus struct { } //+kubebuilder:object:root=true -//+kubebuilder:storageversion //+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" //+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if current state is achieved" //+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Indicates achievement of current state" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.conversion.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.conversion.go new file mode 100644 index 000000000..8701bed49 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.conversion.go @@ -0,0 +1,2243 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + unsafe "unsafe" + + v1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" + dwdparse "github.com/DataWorkflowServices/dws/utils/dwdparse" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AllocationSetColocationConstraint)(nil), (*v1alpha3.AllocationSetColocationConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_AllocationSetColocationConstraint_To_v1alpha3_AllocationSetColocationConstraint(a.(*AllocationSetColocationConstraint), b.(*v1alpha3.AllocationSetColocationConstraint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.AllocationSetColocationConstraint)(nil), (*AllocationSetColocationConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_AllocationSetColocationConstraint_To_v1alpha2_AllocationSetColocationConstraint(a.(*v1alpha3.AllocationSetColocationConstraint), b.(*AllocationSetColocationConstraint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AllocationSetConstraints)(nil), (*v1alpha3.AllocationSetConstraints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints(a.(*AllocationSetConstraints), b.(*v1alpha3.AllocationSetConstraints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.AllocationSetConstraints)(nil), (*AllocationSetConstraints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints(a.(*v1alpha3.AllocationSetConstraints), b.(*AllocationSetConstraints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMount)(nil), (*v1alpha3.ClientMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMount_To_v1alpha3_ClientMount(a.(*ClientMount), b.(*v1alpha3.ClientMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMount)(nil), (*ClientMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMount_To_v1alpha2_ClientMount(a.(*v1alpha3.ClientMount), b.(*ClientMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountDevice)(nil), (*v1alpha3.ClientMountDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice(a.(*ClientMountDevice), b.(*v1alpha3.ClientMountDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountDevice)(nil), (*ClientMountDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice(a.(*v1alpha3.ClientMountDevice), b.(*ClientMountDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountDeviceLVM)(nil), (*v1alpha3.ClientMountDeviceLVM)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountDeviceLVM_To_v1alpha3_ClientMountDeviceLVM(a.(*ClientMountDeviceLVM), b.(*v1alpha3.ClientMountDeviceLVM), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountDeviceLVM)(nil), (*ClientMountDeviceLVM)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountDeviceLVM_To_v1alpha2_ClientMountDeviceLVM(a.(*v1alpha3.ClientMountDeviceLVM), b.(*ClientMountDeviceLVM), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountDeviceLustre)(nil), (*v1alpha3.ClientMountDeviceLustre)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountDeviceLustre_To_v1alpha3_ClientMountDeviceLustre(a.(*ClientMountDeviceLustre), b.(*v1alpha3.ClientMountDeviceLustre), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountDeviceLustre)(nil), (*ClientMountDeviceLustre)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountDeviceLustre_To_v1alpha2_ClientMountDeviceLustre(a.(*v1alpha3.ClientMountDeviceLustre), b.(*ClientMountDeviceLustre), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountDeviceReference)(nil), (*v1alpha3.ClientMountDeviceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountDeviceReference_To_v1alpha3_ClientMountDeviceReference(a.(*ClientMountDeviceReference), b.(*v1alpha3.ClientMountDeviceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountDeviceReference)(nil), (*ClientMountDeviceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountDeviceReference_To_v1alpha2_ClientMountDeviceReference(a.(*v1alpha3.ClientMountDeviceReference), b.(*ClientMountDeviceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountInfo)(nil), (*v1alpha3.ClientMountInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountInfo_To_v1alpha3_ClientMountInfo(a.(*ClientMountInfo), b.(*v1alpha3.ClientMountInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountInfo)(nil), (*ClientMountInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountInfo_To_v1alpha2_ClientMountInfo(a.(*v1alpha3.ClientMountInfo), b.(*ClientMountInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountInfoStatus)(nil), (*v1alpha3.ClientMountInfoStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountInfoStatus_To_v1alpha3_ClientMountInfoStatus(a.(*ClientMountInfoStatus), b.(*v1alpha3.ClientMountInfoStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountInfoStatus)(nil), (*ClientMountInfoStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountInfoStatus_To_v1alpha2_ClientMountInfoStatus(a.(*v1alpha3.ClientMountInfoStatus), b.(*ClientMountInfoStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountList)(nil), (*v1alpha3.ClientMountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountList_To_v1alpha3_ClientMountList(a.(*ClientMountList), b.(*v1alpha3.ClientMountList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountList)(nil), (*ClientMountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountList_To_v1alpha2_ClientMountList(a.(*v1alpha3.ClientMountList), b.(*ClientMountList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountNVMeDesc)(nil), (*v1alpha3.ClientMountNVMeDesc)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountNVMeDesc_To_v1alpha3_ClientMountNVMeDesc(a.(*ClientMountNVMeDesc), b.(*v1alpha3.ClientMountNVMeDesc), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountNVMeDesc)(nil), (*ClientMountNVMeDesc)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountNVMeDesc_To_v1alpha2_ClientMountNVMeDesc(a.(*v1alpha3.ClientMountNVMeDesc), b.(*ClientMountNVMeDesc), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountSpec)(nil), (*v1alpha3.ClientMountSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec(a.(*ClientMountSpec), b.(*v1alpha3.ClientMountSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountSpec)(nil), (*ClientMountSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec(a.(*v1alpha3.ClientMountSpec), b.(*ClientMountSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClientMountStatus)(nil), (*v1alpha3.ClientMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus(a.(*ClientMountStatus), b.(*v1alpha3.ClientMountStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ClientMountStatus)(nil), (*ClientMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus(a.(*v1alpha3.ClientMountStatus), b.(*ClientMountStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputeBreakdown)(nil), (*v1alpha3.ComputeBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputeBreakdown_To_v1alpha3_ComputeBreakdown(a.(*ComputeBreakdown), b.(*v1alpha3.ComputeBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputeBreakdown)(nil), (*ComputeBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputeBreakdown_To_v1alpha2_ComputeBreakdown(a.(*v1alpha3.ComputeBreakdown), b.(*ComputeBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputeConstraints)(nil), (*v1alpha3.ComputeConstraints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints(a.(*ComputeConstraints), b.(*v1alpha3.ComputeConstraints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputeConstraints)(nil), (*ComputeConstraints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints(a.(*v1alpha3.ComputeConstraints), b.(*ComputeConstraints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputeLocationAccess)(nil), (*v1alpha3.ComputeLocationAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputeLocationAccess_To_v1alpha3_ComputeLocationAccess(a.(*ComputeLocationAccess), b.(*v1alpha3.ComputeLocationAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputeLocationAccess)(nil), (*ComputeLocationAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputeLocationAccess_To_v1alpha2_ComputeLocationAccess(a.(*v1alpha3.ComputeLocationAccess), b.(*ComputeLocationAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputeLocationConstraint)(nil), (*v1alpha3.ComputeLocationConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputeLocationConstraint_To_v1alpha3_ComputeLocationConstraint(a.(*ComputeLocationConstraint), b.(*v1alpha3.ComputeLocationConstraint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputeLocationConstraint)(nil), (*ComputeLocationConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputeLocationConstraint_To_v1alpha2_ComputeLocationConstraint(a.(*v1alpha3.ComputeLocationConstraint), b.(*ComputeLocationConstraint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Computes)(nil), (*v1alpha3.Computes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_Computes_To_v1alpha3_Computes(a.(*Computes), b.(*v1alpha3.Computes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.Computes)(nil), (*Computes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_Computes_To_v1alpha2_Computes(a.(*v1alpha3.Computes), b.(*Computes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputesData)(nil), (*v1alpha3.ComputesData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputesData_To_v1alpha3_ComputesData(a.(*ComputesData), b.(*v1alpha3.ComputesData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputesData)(nil), (*ComputesData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputesData_To_v1alpha2_ComputesData(a.(*v1alpha3.ComputesData), b.(*ComputesData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ComputesList)(nil), (*v1alpha3.ComputesList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ComputesList_To_v1alpha3_ComputesList(a.(*ComputesList), b.(*v1alpha3.ComputesList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ComputesList)(nil), (*ComputesList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ComputesList_To_v1alpha2_ComputesList(a.(*v1alpha3.ComputesList), b.(*ComputesList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DWDirectiveRule)(nil), (*v1alpha3.DWDirectiveRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule(a.(*DWDirectiveRule), b.(*v1alpha3.DWDirectiveRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DWDirectiveRule)(nil), (*DWDirectiveRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule(a.(*v1alpha3.DWDirectiveRule), b.(*DWDirectiveRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DWDirectiveRuleList)(nil), (*v1alpha3.DWDirectiveRuleList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DWDirectiveRuleList_To_v1alpha3_DWDirectiveRuleList(a.(*DWDirectiveRuleList), b.(*v1alpha3.DWDirectiveRuleList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DWDirectiveRuleList)(nil), (*DWDirectiveRuleList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DWDirectiveRuleList_To_v1alpha2_DWDirectiveRuleList(a.(*v1alpha3.DWDirectiveRuleList), b.(*DWDirectiveRuleList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DirectiveBreakdown)(nil), (*v1alpha3.DirectiveBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown(a.(*DirectiveBreakdown), b.(*v1alpha3.DirectiveBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DirectiveBreakdown)(nil), (*DirectiveBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown(a.(*v1alpha3.DirectiveBreakdown), b.(*DirectiveBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DirectiveBreakdownList)(nil), (*v1alpha3.DirectiveBreakdownList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DirectiveBreakdownList_To_v1alpha3_DirectiveBreakdownList(a.(*DirectiveBreakdownList), b.(*v1alpha3.DirectiveBreakdownList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DirectiveBreakdownList)(nil), (*DirectiveBreakdownList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DirectiveBreakdownList_To_v1alpha2_DirectiveBreakdownList(a.(*v1alpha3.DirectiveBreakdownList), b.(*DirectiveBreakdownList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DirectiveBreakdownSpec)(nil), (*v1alpha3.DirectiveBreakdownSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec(a.(*DirectiveBreakdownSpec), b.(*v1alpha3.DirectiveBreakdownSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DirectiveBreakdownSpec)(nil), (*DirectiveBreakdownSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec(a.(*v1alpha3.DirectiveBreakdownSpec), b.(*DirectiveBreakdownSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DirectiveBreakdownStatus)(nil), (*v1alpha3.DirectiveBreakdownStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus(a.(*DirectiveBreakdownStatus), b.(*v1alpha3.DirectiveBreakdownStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.DirectiveBreakdownStatus)(nil), (*DirectiveBreakdownStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus(a.(*v1alpha3.DirectiveBreakdownStatus), b.(*DirectiveBreakdownStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Node)(nil), (*v1alpha3.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_Node_To_v1alpha3_Node(a.(*Node), b.(*v1alpha3.Node), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.Node)(nil), (*Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_Node_To_v1alpha2_Node(a.(*v1alpha3.Node), b.(*Node), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PersistentStorageInstance)(nil), (*v1alpha3.PersistentStorageInstance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(a.(*PersistentStorageInstance), b.(*v1alpha3.PersistentStorageInstance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.PersistentStorageInstance)(nil), (*PersistentStorageInstance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(a.(*v1alpha3.PersistentStorageInstance), b.(*PersistentStorageInstance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PersistentStorageInstanceList)(nil), (*v1alpha3.PersistentStorageInstanceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PersistentStorageInstanceList_To_v1alpha3_PersistentStorageInstanceList(a.(*PersistentStorageInstanceList), b.(*v1alpha3.PersistentStorageInstanceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.PersistentStorageInstanceList)(nil), (*PersistentStorageInstanceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_PersistentStorageInstanceList_To_v1alpha2_PersistentStorageInstanceList(a.(*v1alpha3.PersistentStorageInstanceList), b.(*PersistentStorageInstanceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PersistentStorageInstanceSpec)(nil), (*v1alpha3.PersistentStorageInstanceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec(a.(*PersistentStorageInstanceSpec), b.(*v1alpha3.PersistentStorageInstanceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.PersistentStorageInstanceSpec)(nil), (*PersistentStorageInstanceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec(a.(*v1alpha3.PersistentStorageInstanceSpec), b.(*PersistentStorageInstanceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PersistentStorageInstanceStatus)(nil), (*v1alpha3.PersistentStorageInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus(a.(*PersistentStorageInstanceStatus), b.(*v1alpha3.PersistentStorageInstanceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceError)(nil), (*v1alpha3.ResourceError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(a.(*ResourceError), b.(*v1alpha3.ResourceError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ResourceError)(nil), (*ResourceError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(a.(*v1alpha3.ResourceError), b.(*ResourceError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceErrorInfo)(nil), (*v1alpha3.ResourceErrorInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ResourceErrorInfo_To_v1alpha3_ResourceErrorInfo(a.(*ResourceErrorInfo), b.(*v1alpha3.ResourceErrorInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ResourceErrorInfo)(nil), (*ResourceErrorInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ResourceErrorInfo_To_v1alpha2_ResourceErrorInfo(a.(*v1alpha3.ResourceErrorInfo), b.(*ResourceErrorInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Servers)(nil), (*v1alpha3.Servers)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_Servers_To_v1alpha3_Servers(a.(*Servers), b.(*v1alpha3.Servers), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.Servers)(nil), (*Servers)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_Servers_To_v1alpha2_Servers(a.(*v1alpha3.Servers), b.(*Servers), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersList)(nil), (*v1alpha3.ServersList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersList_To_v1alpha3_ServersList(a.(*ServersList), b.(*v1alpha3.ServersList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersList)(nil), (*ServersList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersList_To_v1alpha2_ServersList(a.(*v1alpha3.ServersList), b.(*ServersList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersSpec)(nil), (*v1alpha3.ServersSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec(a.(*ServersSpec), b.(*v1alpha3.ServersSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersSpec)(nil), (*ServersSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec(a.(*v1alpha3.ServersSpec), b.(*ServersSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersSpecAllocationSet)(nil), (*v1alpha3.ServersSpecAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersSpecAllocationSet_To_v1alpha3_ServersSpecAllocationSet(a.(*ServersSpecAllocationSet), b.(*v1alpha3.ServersSpecAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersSpecAllocationSet)(nil), (*ServersSpecAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersSpecAllocationSet_To_v1alpha2_ServersSpecAllocationSet(a.(*v1alpha3.ServersSpecAllocationSet), b.(*ServersSpecAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersSpecStorage)(nil), (*v1alpha3.ServersSpecStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersSpecStorage_To_v1alpha3_ServersSpecStorage(a.(*ServersSpecStorage), b.(*v1alpha3.ServersSpecStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersSpecStorage)(nil), (*ServersSpecStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersSpecStorage_To_v1alpha2_ServersSpecStorage(a.(*v1alpha3.ServersSpecStorage), b.(*ServersSpecStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersStatus)(nil), (*v1alpha3.ServersStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus(a.(*ServersStatus), b.(*v1alpha3.ServersStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersStatus)(nil), (*ServersStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus(a.(*v1alpha3.ServersStatus), b.(*ServersStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersStatusAllocationSet)(nil), (*v1alpha3.ServersStatusAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersStatusAllocationSet_To_v1alpha3_ServersStatusAllocationSet(a.(*ServersStatusAllocationSet), b.(*v1alpha3.ServersStatusAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersStatusAllocationSet)(nil), (*ServersStatusAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersStatusAllocationSet_To_v1alpha2_ServersStatusAllocationSet(a.(*v1alpha3.ServersStatusAllocationSet), b.(*ServersStatusAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServersStatusStorage)(nil), (*v1alpha3.ServersStatusStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_ServersStatusStorage_To_v1alpha3_ServersStatusStorage(a.(*ServersStatusStorage), b.(*v1alpha3.ServersStatusStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.ServersStatusStorage)(nil), (*ServersStatusStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ServersStatusStorage_To_v1alpha2_ServersStatusStorage(a.(*v1alpha3.ServersStatusStorage), b.(*ServersStatusStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Storage)(nil), (*v1alpha3.Storage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_Storage_To_v1alpha3_Storage(a.(*Storage), b.(*v1alpha3.Storage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.Storage)(nil), (*Storage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_Storage_To_v1alpha2_Storage(a.(*v1alpha3.Storage), b.(*Storage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageAccess)(nil), (*v1alpha3.StorageAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess(a.(*StorageAccess), b.(*v1alpha3.StorageAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageAccess)(nil), (*StorageAccess)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess(a.(*v1alpha3.StorageAccess), b.(*StorageAccess), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageAllocationSet)(nil), (*v1alpha3.StorageAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageAllocationSet_To_v1alpha3_StorageAllocationSet(a.(*StorageAllocationSet), b.(*v1alpha3.StorageAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageAllocationSet)(nil), (*StorageAllocationSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageAllocationSet_To_v1alpha2_StorageAllocationSet(a.(*v1alpha3.StorageAllocationSet), b.(*StorageAllocationSet), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageBreakdown)(nil), (*v1alpha3.StorageBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageBreakdown_To_v1alpha3_StorageBreakdown(a.(*StorageBreakdown), b.(*v1alpha3.StorageBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageBreakdown)(nil), (*StorageBreakdown)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageBreakdown_To_v1alpha2_StorageBreakdown(a.(*v1alpha3.StorageBreakdown), b.(*StorageBreakdown), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageDevice)(nil), (*v1alpha3.StorageDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageDevice_To_v1alpha3_StorageDevice(a.(*StorageDevice), b.(*v1alpha3.StorageDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageDevice)(nil), (*StorageDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageDevice_To_v1alpha2_StorageDevice(a.(*v1alpha3.StorageDevice), b.(*StorageDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageList)(nil), (*v1alpha3.StorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageList_To_v1alpha3_StorageList(a.(*StorageList), b.(*v1alpha3.StorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageList)(nil), (*StorageList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageList_To_v1alpha2_StorageList(a.(*v1alpha3.StorageList), b.(*StorageList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageSpec)(nil), (*v1alpha3.StorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec(a.(*StorageSpec), b.(*v1alpha3.StorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageSpec)(nil), (*StorageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec(a.(*v1alpha3.StorageSpec), b.(*StorageSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StorageStatus)(nil), (*v1alpha3.StorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus(a.(*StorageStatus), b.(*v1alpha3.StorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.StorageStatus)(nil), (*StorageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus(a.(*v1alpha3.StorageStatus), b.(*StorageStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfiguration)(nil), (*v1alpha3.SystemConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration(a.(*SystemConfiguration), b.(*v1alpha3.SystemConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfiguration)(nil), (*SystemConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration(a.(*v1alpha3.SystemConfiguration), b.(*SystemConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationComputeNodeReference)(nil), (*v1alpha3.SystemConfigurationComputeNodeReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationComputeNodeReference_To_v1alpha3_SystemConfigurationComputeNodeReference(a.(*SystemConfigurationComputeNodeReference), b.(*v1alpha3.SystemConfigurationComputeNodeReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationComputeNodeReference)(nil), (*SystemConfigurationComputeNodeReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationComputeNodeReference_To_v1alpha2_SystemConfigurationComputeNodeReference(a.(*v1alpha3.SystemConfigurationComputeNodeReference), b.(*SystemConfigurationComputeNodeReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationExternalComputeNode)(nil), (*v1alpha3.SystemConfigurationExternalComputeNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationExternalComputeNode_To_v1alpha3_SystemConfigurationExternalComputeNode(a.(*SystemConfigurationExternalComputeNode), b.(*v1alpha3.SystemConfigurationExternalComputeNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationExternalComputeNode)(nil), (*SystemConfigurationExternalComputeNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationExternalComputeNode_To_v1alpha2_SystemConfigurationExternalComputeNode(a.(*v1alpha3.SystemConfigurationExternalComputeNode), b.(*SystemConfigurationExternalComputeNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationList)(nil), (*v1alpha3.SystemConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationList_To_v1alpha3_SystemConfigurationList(a.(*SystemConfigurationList), b.(*v1alpha3.SystemConfigurationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationList)(nil), (*SystemConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationList_To_v1alpha2_SystemConfigurationList(a.(*v1alpha3.SystemConfigurationList), b.(*SystemConfigurationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationSpec)(nil), (*v1alpha3.SystemConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec(a.(*SystemConfigurationSpec), b.(*v1alpha3.SystemConfigurationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationSpec)(nil), (*SystemConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec(a.(*v1alpha3.SystemConfigurationSpec), b.(*SystemConfigurationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationStatus)(nil), (*v1alpha3.SystemConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus(a.(*SystemConfigurationStatus), b.(*v1alpha3.SystemConfigurationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationStatus)(nil), (*SystemConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus(a.(*v1alpha3.SystemConfigurationStatus), b.(*SystemConfigurationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SystemConfigurationStorageNode)(nil), (*v1alpha3.SystemConfigurationStorageNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_SystemConfigurationStorageNode_To_v1alpha3_SystemConfigurationStorageNode(a.(*SystemConfigurationStorageNode), b.(*v1alpha3.SystemConfigurationStorageNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.SystemConfigurationStorageNode)(nil), (*SystemConfigurationStorageNode)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_SystemConfigurationStorageNode_To_v1alpha2_SystemConfigurationStorageNode(a.(*v1alpha3.SystemConfigurationStorageNode), b.(*SystemConfigurationStorageNode), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Workflow)(nil), (*v1alpha3.Workflow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_Workflow_To_v1alpha3_Workflow(a.(*Workflow), b.(*v1alpha3.Workflow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.Workflow)(nil), (*Workflow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_Workflow_To_v1alpha2_Workflow(a.(*v1alpha3.Workflow), b.(*Workflow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkflowDriverStatus)(nil), (*v1alpha3.WorkflowDriverStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_WorkflowDriverStatus_To_v1alpha3_WorkflowDriverStatus(a.(*WorkflowDriverStatus), b.(*v1alpha3.WorkflowDriverStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.WorkflowDriverStatus)(nil), (*WorkflowDriverStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_WorkflowDriverStatus_To_v1alpha2_WorkflowDriverStatus(a.(*v1alpha3.WorkflowDriverStatus), b.(*WorkflowDriverStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkflowList)(nil), (*v1alpha3.WorkflowList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_WorkflowList_To_v1alpha3_WorkflowList(a.(*WorkflowList), b.(*v1alpha3.WorkflowList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.WorkflowList)(nil), (*WorkflowList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_WorkflowList_To_v1alpha2_WorkflowList(a.(*v1alpha3.WorkflowList), b.(*WorkflowList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkflowSpec)(nil), (*v1alpha3.WorkflowSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec(a.(*WorkflowSpec), b.(*v1alpha3.WorkflowSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.WorkflowSpec)(nil), (*WorkflowSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec(a.(*v1alpha3.WorkflowSpec), b.(*WorkflowSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkflowStatus)(nil), (*v1alpha3.WorkflowStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus(a.(*WorkflowStatus), b.(*v1alpha3.WorkflowStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha3.WorkflowStatus)(nil), (*WorkflowStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus(a.(*v1alpha3.WorkflowStatus), b.(*WorkflowStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha3.PersistentStorageInstanceStatus)(nil), (*PersistentStorageInstanceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_PersistentStorageInstanceStatus_To_v1alpha2_PersistentStorageInstanceStatus(a.(*v1alpha3.PersistentStorageInstanceStatus), b.(*PersistentStorageInstanceStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha2_AllocationSetColocationConstraint_To_v1alpha3_AllocationSetColocationConstraint(in *AllocationSetColocationConstraint, out *v1alpha3.AllocationSetColocationConstraint, s conversion.Scope) error { + out.Type = in.Type + out.Key = in.Key + return nil +} + +// Convert_v1alpha2_AllocationSetColocationConstraint_To_v1alpha3_AllocationSetColocationConstraint is an autogenerated conversion function. +func Convert_v1alpha2_AllocationSetColocationConstraint_To_v1alpha3_AllocationSetColocationConstraint(in *AllocationSetColocationConstraint, out *v1alpha3.AllocationSetColocationConstraint, s conversion.Scope) error { + return autoConvert_v1alpha2_AllocationSetColocationConstraint_To_v1alpha3_AllocationSetColocationConstraint(in, out, s) +} + +func autoConvert_v1alpha3_AllocationSetColocationConstraint_To_v1alpha2_AllocationSetColocationConstraint(in *v1alpha3.AllocationSetColocationConstraint, out *AllocationSetColocationConstraint, s conversion.Scope) error { + out.Type = in.Type + out.Key = in.Key + return nil +} + +// Convert_v1alpha3_AllocationSetColocationConstraint_To_v1alpha2_AllocationSetColocationConstraint is an autogenerated conversion function. +func Convert_v1alpha3_AllocationSetColocationConstraint_To_v1alpha2_AllocationSetColocationConstraint(in *v1alpha3.AllocationSetColocationConstraint, out *AllocationSetColocationConstraint, s conversion.Scope) error { + return autoConvert_v1alpha3_AllocationSetColocationConstraint_To_v1alpha2_AllocationSetColocationConstraint(in, out, s) +} + +func autoConvert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints(in *AllocationSetConstraints, out *v1alpha3.AllocationSetConstraints, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + out.Scale = in.Scale + out.Count = in.Count + out.Colocation = *(*[]v1alpha3.AllocationSetColocationConstraint)(unsafe.Pointer(&in.Colocation)) + return nil +} + +// Convert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints is an autogenerated conversion function. +func Convert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints(in *AllocationSetConstraints, out *v1alpha3.AllocationSetConstraints, s conversion.Scope) error { + return autoConvert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints(in, out, s) +} + +func autoConvert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints(in *v1alpha3.AllocationSetConstraints, out *AllocationSetConstraints, s conversion.Scope) error { + out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels)) + out.Scale = in.Scale + out.Count = in.Count + out.Colocation = *(*[]AllocationSetColocationConstraint)(unsafe.Pointer(&in.Colocation)) + return nil +} + +// Convert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints is an autogenerated conversion function. +func Convert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints(in *v1alpha3.AllocationSetConstraints, out *AllocationSetConstraints, s conversion.Scope) error { + return autoConvert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints(in, out, s) +} + +func autoConvert_v1alpha2_ClientMount_To_v1alpha3_ClientMount(in *ClientMount, out *v1alpha3.ClientMount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ClientMount_To_v1alpha3_ClientMount is an autogenerated conversion function. +func Convert_v1alpha2_ClientMount_To_v1alpha3_ClientMount(in *ClientMount, out *v1alpha3.ClientMount, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMount_To_v1alpha3_ClientMount(in, out, s) +} + +func autoConvert_v1alpha3_ClientMount_To_v1alpha2_ClientMount(in *v1alpha3.ClientMount, out *ClientMount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_ClientMount_To_v1alpha2_ClientMount is an autogenerated conversion function. +func Convert_v1alpha3_ClientMount_To_v1alpha2_ClientMount(in *v1alpha3.ClientMount, out *ClientMount, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMount_To_v1alpha2_ClientMount(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice(in *ClientMountDevice, out *v1alpha3.ClientMountDevice, s conversion.Scope) error { + out.Type = v1alpha3.ClientMountDeviceType(in.Type) + out.Lustre = (*v1alpha3.ClientMountDeviceLustre)(unsafe.Pointer(in.Lustre)) + out.LVM = (*v1alpha3.ClientMountDeviceLVM)(unsafe.Pointer(in.LVM)) + out.DeviceReference = (*v1alpha3.ClientMountDeviceReference)(unsafe.Pointer(in.DeviceReference)) + return nil +} + +// Convert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice(in *ClientMountDevice, out *v1alpha3.ClientMountDevice, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice(in *v1alpha3.ClientMountDevice, out *ClientMountDevice, s conversion.Scope) error { + out.Type = ClientMountDeviceType(in.Type) + out.Lustre = (*ClientMountDeviceLustre)(unsafe.Pointer(in.Lustre)) + out.LVM = (*ClientMountDeviceLVM)(unsafe.Pointer(in.LVM)) + out.DeviceReference = (*ClientMountDeviceReference)(unsafe.Pointer(in.DeviceReference)) + return nil +} + +// Convert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice(in *v1alpha3.ClientMountDevice, out *ClientMountDevice, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountDeviceLVM_To_v1alpha3_ClientMountDeviceLVM(in *ClientMountDeviceLVM, out *v1alpha3.ClientMountDeviceLVM, s conversion.Scope) error { + out.DeviceType = v1alpha3.ClientMountLVMDeviceType(in.DeviceType) + out.NVMeInfo = *(*[]v1alpha3.ClientMountNVMeDesc)(unsafe.Pointer(&in.NVMeInfo)) + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + return nil +} + +// Convert_v1alpha2_ClientMountDeviceLVM_To_v1alpha3_ClientMountDeviceLVM is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountDeviceLVM_To_v1alpha3_ClientMountDeviceLVM(in *ClientMountDeviceLVM, out *v1alpha3.ClientMountDeviceLVM, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountDeviceLVM_To_v1alpha3_ClientMountDeviceLVM(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountDeviceLVM_To_v1alpha2_ClientMountDeviceLVM(in *v1alpha3.ClientMountDeviceLVM, out *ClientMountDeviceLVM, s conversion.Scope) error { + out.DeviceType = ClientMountLVMDeviceType(in.DeviceType) + out.NVMeInfo = *(*[]ClientMountNVMeDesc)(unsafe.Pointer(&in.NVMeInfo)) + out.VolumeGroup = in.VolumeGroup + out.LogicalVolume = in.LogicalVolume + return nil +} + +// Convert_v1alpha3_ClientMountDeviceLVM_To_v1alpha2_ClientMountDeviceLVM is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountDeviceLVM_To_v1alpha2_ClientMountDeviceLVM(in *v1alpha3.ClientMountDeviceLVM, out *ClientMountDeviceLVM, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountDeviceLVM_To_v1alpha2_ClientMountDeviceLVM(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountDeviceLustre_To_v1alpha3_ClientMountDeviceLustre(in *ClientMountDeviceLustre, out *v1alpha3.ClientMountDeviceLustre, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.MgsAddresses = in.MgsAddresses + return nil +} + +// Convert_v1alpha2_ClientMountDeviceLustre_To_v1alpha3_ClientMountDeviceLustre is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountDeviceLustre_To_v1alpha3_ClientMountDeviceLustre(in *ClientMountDeviceLustre, out *v1alpha3.ClientMountDeviceLustre, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountDeviceLustre_To_v1alpha3_ClientMountDeviceLustre(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountDeviceLustre_To_v1alpha2_ClientMountDeviceLustre(in *v1alpha3.ClientMountDeviceLustre, out *ClientMountDeviceLustre, s conversion.Scope) error { + out.FileSystemName = in.FileSystemName + out.MgsAddresses = in.MgsAddresses + return nil +} + +// Convert_v1alpha3_ClientMountDeviceLustre_To_v1alpha2_ClientMountDeviceLustre is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountDeviceLustre_To_v1alpha2_ClientMountDeviceLustre(in *v1alpha3.ClientMountDeviceLustre, out *ClientMountDeviceLustre, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountDeviceLustre_To_v1alpha2_ClientMountDeviceLustre(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountDeviceReference_To_v1alpha3_ClientMountDeviceReference(in *ClientMountDeviceReference, out *v1alpha3.ClientMountDeviceReference, s conversion.Scope) error { + out.ObjectReference = in.ObjectReference + out.Data = in.Data + return nil +} + +// Convert_v1alpha2_ClientMountDeviceReference_To_v1alpha3_ClientMountDeviceReference is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountDeviceReference_To_v1alpha3_ClientMountDeviceReference(in *ClientMountDeviceReference, out *v1alpha3.ClientMountDeviceReference, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountDeviceReference_To_v1alpha3_ClientMountDeviceReference(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountDeviceReference_To_v1alpha2_ClientMountDeviceReference(in *v1alpha3.ClientMountDeviceReference, out *ClientMountDeviceReference, s conversion.Scope) error { + out.ObjectReference = in.ObjectReference + out.Data = in.Data + return nil +} + +// Convert_v1alpha3_ClientMountDeviceReference_To_v1alpha2_ClientMountDeviceReference is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountDeviceReference_To_v1alpha2_ClientMountDeviceReference(in *v1alpha3.ClientMountDeviceReference, out *ClientMountDeviceReference, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountDeviceReference_To_v1alpha2_ClientMountDeviceReference(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountInfo_To_v1alpha3_ClientMountInfo(in *ClientMountInfo, out *v1alpha3.ClientMountInfo, s conversion.Scope) error { + out.MountPath = in.MountPath + out.UserID = in.UserID + out.GroupID = in.GroupID + out.SetPermissions = in.SetPermissions + out.Options = in.Options + if err := Convert_v1alpha2_ClientMountDevice_To_v1alpha3_ClientMountDevice(&in.Device, &out.Device, s); err != nil { + return err + } + out.Type = in.Type + out.TargetType = in.TargetType + out.Compute = in.Compute + return nil +} + +// Convert_v1alpha2_ClientMountInfo_To_v1alpha3_ClientMountInfo is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountInfo_To_v1alpha3_ClientMountInfo(in *ClientMountInfo, out *v1alpha3.ClientMountInfo, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountInfo_To_v1alpha3_ClientMountInfo(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountInfo_To_v1alpha2_ClientMountInfo(in *v1alpha3.ClientMountInfo, out *ClientMountInfo, s conversion.Scope) error { + out.MountPath = in.MountPath + out.UserID = in.UserID + out.GroupID = in.GroupID + out.SetPermissions = in.SetPermissions + out.Options = in.Options + if err := Convert_v1alpha3_ClientMountDevice_To_v1alpha2_ClientMountDevice(&in.Device, &out.Device, s); err != nil { + return err + } + out.Type = in.Type + out.TargetType = in.TargetType + out.Compute = in.Compute + return nil +} + +// Convert_v1alpha3_ClientMountInfo_To_v1alpha2_ClientMountInfo is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountInfo_To_v1alpha2_ClientMountInfo(in *v1alpha3.ClientMountInfo, out *ClientMountInfo, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountInfo_To_v1alpha2_ClientMountInfo(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountInfoStatus_To_v1alpha3_ClientMountInfoStatus(in *ClientMountInfoStatus, out *v1alpha3.ClientMountInfoStatus, s conversion.Scope) error { + out.State = v1alpha3.ClientMountState(in.State) + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha2_ClientMountInfoStatus_To_v1alpha3_ClientMountInfoStatus is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountInfoStatus_To_v1alpha3_ClientMountInfoStatus(in *ClientMountInfoStatus, out *v1alpha3.ClientMountInfoStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountInfoStatus_To_v1alpha3_ClientMountInfoStatus(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountInfoStatus_To_v1alpha2_ClientMountInfoStatus(in *v1alpha3.ClientMountInfoStatus, out *ClientMountInfoStatus, s conversion.Scope) error { + out.State = ClientMountState(in.State) + out.Ready = in.Ready + return nil +} + +// Convert_v1alpha3_ClientMountInfoStatus_To_v1alpha2_ClientMountInfoStatus is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountInfoStatus_To_v1alpha2_ClientMountInfoStatus(in *v1alpha3.ClientMountInfoStatus, out *ClientMountInfoStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountInfoStatus_To_v1alpha2_ClientMountInfoStatus(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountList_To_v1alpha3_ClientMountList(in *ClientMountList, out *v1alpha3.ClientMountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.ClientMount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ClientMountList_To_v1alpha3_ClientMountList is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountList_To_v1alpha3_ClientMountList(in *ClientMountList, out *v1alpha3.ClientMountList, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountList_To_v1alpha3_ClientMountList(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountList_To_v1alpha2_ClientMountList(in *v1alpha3.ClientMountList, out *ClientMountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ClientMount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_ClientMountList_To_v1alpha2_ClientMountList is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountList_To_v1alpha2_ClientMountList(in *v1alpha3.ClientMountList, out *ClientMountList, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountList_To_v1alpha2_ClientMountList(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountNVMeDesc_To_v1alpha3_ClientMountNVMeDesc(in *ClientMountNVMeDesc, out *v1alpha3.ClientMountNVMeDesc, s conversion.Scope) error { + out.DeviceSerial = in.DeviceSerial + out.NamespaceID = in.NamespaceID + out.NamespaceGUID = in.NamespaceGUID + return nil +} + +// Convert_v1alpha2_ClientMountNVMeDesc_To_v1alpha3_ClientMountNVMeDesc is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountNVMeDesc_To_v1alpha3_ClientMountNVMeDesc(in *ClientMountNVMeDesc, out *v1alpha3.ClientMountNVMeDesc, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountNVMeDesc_To_v1alpha3_ClientMountNVMeDesc(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountNVMeDesc_To_v1alpha2_ClientMountNVMeDesc(in *v1alpha3.ClientMountNVMeDesc, out *ClientMountNVMeDesc, s conversion.Scope) error { + out.DeviceSerial = in.DeviceSerial + out.NamespaceID = in.NamespaceID + out.NamespaceGUID = in.NamespaceGUID + return nil +} + +// Convert_v1alpha3_ClientMountNVMeDesc_To_v1alpha2_ClientMountNVMeDesc is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountNVMeDesc_To_v1alpha2_ClientMountNVMeDesc(in *v1alpha3.ClientMountNVMeDesc, out *ClientMountNVMeDesc, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountNVMeDesc_To_v1alpha2_ClientMountNVMeDesc(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec(in *ClientMountSpec, out *v1alpha3.ClientMountSpec, s conversion.Scope) error { + out.Node = in.Node + out.DesiredState = v1alpha3.ClientMountState(in.DesiredState) + out.Mounts = *(*[]v1alpha3.ClientMountInfo)(unsafe.Pointer(&in.Mounts)) + return nil +} + +// Convert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec(in *ClientMountSpec, out *v1alpha3.ClientMountSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountSpec_To_v1alpha3_ClientMountSpec(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec(in *v1alpha3.ClientMountSpec, out *ClientMountSpec, s conversion.Scope) error { + out.Node = in.Node + out.DesiredState = ClientMountState(in.DesiredState) + out.Mounts = *(*[]ClientMountInfo)(unsafe.Pointer(&in.Mounts)) + return nil +} + +// Convert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec(in *v1alpha3.ClientMountSpec, out *ClientMountSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountSpec_To_v1alpha2_ClientMountSpec(in, out, s) +} + +func autoConvert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus(in *ClientMountStatus, out *v1alpha3.ClientMountStatus, s conversion.Scope) error { + out.Mounts = *(*[]v1alpha3.ClientMountInfoStatus)(unsafe.Pointer(&in.Mounts)) + out.AllReady = in.AllReady + if err := Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus is an autogenerated conversion function. +func Convert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus(in *ClientMountStatus, out *v1alpha3.ClientMountStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_ClientMountStatus_To_v1alpha3_ClientMountStatus(in, out, s) +} + +func autoConvert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus(in *v1alpha3.ClientMountStatus, out *ClientMountStatus, s conversion.Scope) error { + out.Mounts = *(*[]ClientMountInfoStatus)(unsafe.Pointer(&in.Mounts)) + out.AllReady = in.AllReady + if err := Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus is an autogenerated conversion function. +func Convert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus(in *v1alpha3.ClientMountStatus, out *ClientMountStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_ClientMountStatus_To_v1alpha2_ClientMountStatus(in, out, s) +} + +func autoConvert_v1alpha2_ComputeBreakdown_To_v1alpha3_ComputeBreakdown(in *ComputeBreakdown, out *v1alpha3.ComputeBreakdown, s conversion.Scope) error { + if err := Convert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints(&in.Constraints, &out.Constraints, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ComputeBreakdown_To_v1alpha3_ComputeBreakdown is an autogenerated conversion function. +func Convert_v1alpha2_ComputeBreakdown_To_v1alpha3_ComputeBreakdown(in *ComputeBreakdown, out *v1alpha3.ComputeBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputeBreakdown_To_v1alpha3_ComputeBreakdown(in, out, s) +} + +func autoConvert_v1alpha3_ComputeBreakdown_To_v1alpha2_ComputeBreakdown(in *v1alpha3.ComputeBreakdown, out *ComputeBreakdown, s conversion.Scope) error { + if err := Convert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints(&in.Constraints, &out.Constraints, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_ComputeBreakdown_To_v1alpha2_ComputeBreakdown is an autogenerated conversion function. +func Convert_v1alpha3_ComputeBreakdown_To_v1alpha2_ComputeBreakdown(in *v1alpha3.ComputeBreakdown, out *ComputeBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputeBreakdown_To_v1alpha2_ComputeBreakdown(in, out, s) +} + +func autoConvert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints(in *ComputeConstraints, out *v1alpha3.ComputeConstraints, s conversion.Scope) error { + out.Location = *(*[]v1alpha3.ComputeLocationConstraint)(unsafe.Pointer(&in.Location)) + return nil +} + +// Convert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints is an autogenerated conversion function. +func Convert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints(in *ComputeConstraints, out *v1alpha3.ComputeConstraints, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputeConstraints_To_v1alpha3_ComputeConstraints(in, out, s) +} + +func autoConvert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints(in *v1alpha3.ComputeConstraints, out *ComputeConstraints, s conversion.Scope) error { + out.Location = *(*[]ComputeLocationConstraint)(unsafe.Pointer(&in.Location)) + return nil +} + +// Convert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints is an autogenerated conversion function. +func Convert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints(in *v1alpha3.ComputeConstraints, out *ComputeConstraints, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputeConstraints_To_v1alpha2_ComputeConstraints(in, out, s) +} + +func autoConvert_v1alpha2_ComputeLocationAccess_To_v1alpha3_ComputeLocationAccess(in *ComputeLocationAccess, out *v1alpha3.ComputeLocationAccess, s conversion.Scope) error { + out.Type = v1alpha3.ComputeLocationType(in.Type) + out.Priority = v1alpha3.ComputeLocationPriority(in.Priority) + return nil +} + +// Convert_v1alpha2_ComputeLocationAccess_To_v1alpha3_ComputeLocationAccess is an autogenerated conversion function. +func Convert_v1alpha2_ComputeLocationAccess_To_v1alpha3_ComputeLocationAccess(in *ComputeLocationAccess, out *v1alpha3.ComputeLocationAccess, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputeLocationAccess_To_v1alpha3_ComputeLocationAccess(in, out, s) +} + +func autoConvert_v1alpha3_ComputeLocationAccess_To_v1alpha2_ComputeLocationAccess(in *v1alpha3.ComputeLocationAccess, out *ComputeLocationAccess, s conversion.Scope) error { + out.Type = ComputeLocationType(in.Type) + out.Priority = ComputeLocationPriority(in.Priority) + return nil +} + +// Convert_v1alpha3_ComputeLocationAccess_To_v1alpha2_ComputeLocationAccess is an autogenerated conversion function. +func Convert_v1alpha3_ComputeLocationAccess_To_v1alpha2_ComputeLocationAccess(in *v1alpha3.ComputeLocationAccess, out *ComputeLocationAccess, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputeLocationAccess_To_v1alpha2_ComputeLocationAccess(in, out, s) +} + +func autoConvert_v1alpha2_ComputeLocationConstraint_To_v1alpha3_ComputeLocationConstraint(in *ComputeLocationConstraint, out *v1alpha3.ComputeLocationConstraint, s conversion.Scope) error { + out.Access = *(*[]v1alpha3.ComputeLocationAccess)(unsafe.Pointer(&in.Access)) + out.Reference = in.Reference + return nil +} + +// Convert_v1alpha2_ComputeLocationConstraint_To_v1alpha3_ComputeLocationConstraint is an autogenerated conversion function. +func Convert_v1alpha2_ComputeLocationConstraint_To_v1alpha3_ComputeLocationConstraint(in *ComputeLocationConstraint, out *v1alpha3.ComputeLocationConstraint, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputeLocationConstraint_To_v1alpha3_ComputeLocationConstraint(in, out, s) +} + +func autoConvert_v1alpha3_ComputeLocationConstraint_To_v1alpha2_ComputeLocationConstraint(in *v1alpha3.ComputeLocationConstraint, out *ComputeLocationConstraint, s conversion.Scope) error { + out.Access = *(*[]ComputeLocationAccess)(unsafe.Pointer(&in.Access)) + out.Reference = in.Reference + return nil +} + +// Convert_v1alpha3_ComputeLocationConstraint_To_v1alpha2_ComputeLocationConstraint is an autogenerated conversion function. +func Convert_v1alpha3_ComputeLocationConstraint_To_v1alpha2_ComputeLocationConstraint(in *v1alpha3.ComputeLocationConstraint, out *ComputeLocationConstraint, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputeLocationConstraint_To_v1alpha2_ComputeLocationConstraint(in, out, s) +} + +func autoConvert_v1alpha2_Computes_To_v1alpha3_Computes(in *Computes, out *v1alpha3.Computes, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]v1alpha3.ComputesData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha2_Computes_To_v1alpha3_Computes is an autogenerated conversion function. +func Convert_v1alpha2_Computes_To_v1alpha3_Computes(in *Computes, out *v1alpha3.Computes, s conversion.Scope) error { + return autoConvert_v1alpha2_Computes_To_v1alpha3_Computes(in, out, s) +} + +func autoConvert_v1alpha3_Computes_To_v1alpha2_Computes(in *v1alpha3.Computes, out *Computes, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]ComputesData)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1alpha3_Computes_To_v1alpha2_Computes is an autogenerated conversion function. +func Convert_v1alpha3_Computes_To_v1alpha2_Computes(in *v1alpha3.Computes, out *Computes, s conversion.Scope) error { + return autoConvert_v1alpha3_Computes_To_v1alpha2_Computes(in, out, s) +} + +func autoConvert_v1alpha2_ComputesData_To_v1alpha3_ComputesData(in *ComputesData, out *v1alpha3.ComputesData, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1alpha2_ComputesData_To_v1alpha3_ComputesData is an autogenerated conversion function. +func Convert_v1alpha2_ComputesData_To_v1alpha3_ComputesData(in *ComputesData, out *v1alpha3.ComputesData, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputesData_To_v1alpha3_ComputesData(in, out, s) +} + +func autoConvert_v1alpha3_ComputesData_To_v1alpha2_ComputesData(in *v1alpha3.ComputesData, out *ComputesData, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1alpha3_ComputesData_To_v1alpha2_ComputesData is an autogenerated conversion function. +func Convert_v1alpha3_ComputesData_To_v1alpha2_ComputesData(in *v1alpha3.ComputesData, out *ComputesData, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputesData_To_v1alpha2_ComputesData(in, out, s) +} + +func autoConvert_v1alpha2_ComputesList_To_v1alpha3_ComputesList(in *ComputesList, out *v1alpha3.ComputesList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.Computes)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ComputesList_To_v1alpha3_ComputesList is an autogenerated conversion function. +func Convert_v1alpha2_ComputesList_To_v1alpha3_ComputesList(in *ComputesList, out *v1alpha3.ComputesList, s conversion.Scope) error { + return autoConvert_v1alpha2_ComputesList_To_v1alpha3_ComputesList(in, out, s) +} + +func autoConvert_v1alpha3_ComputesList_To_v1alpha2_ComputesList(in *v1alpha3.ComputesList, out *ComputesList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Computes)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_ComputesList_To_v1alpha2_ComputesList is an autogenerated conversion function. +func Convert_v1alpha3_ComputesList_To_v1alpha2_ComputesList(in *v1alpha3.ComputesList, out *ComputesList, s conversion.Scope) error { + return autoConvert_v1alpha3_ComputesList_To_v1alpha2_ComputesList(in, out, s) +} + +func autoConvert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule(in *DWDirectiveRule, out *v1alpha3.DWDirectiveRule, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Spec = *(*[]dwdparse.DWDirectiveRuleSpec)(unsafe.Pointer(&in.Spec)) + return nil +} + +// Convert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule is an autogenerated conversion function. +func Convert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule(in *DWDirectiveRule, out *v1alpha3.DWDirectiveRule, s conversion.Scope) error { + return autoConvert_v1alpha2_DWDirectiveRule_To_v1alpha3_DWDirectiveRule(in, out, s) +} + +func autoConvert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule(in *v1alpha3.DWDirectiveRule, out *DWDirectiveRule, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Spec = *(*[]dwdparse.DWDirectiveRuleSpec)(unsafe.Pointer(&in.Spec)) + return nil +} + +// Convert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule is an autogenerated conversion function. +func Convert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule(in *v1alpha3.DWDirectiveRule, out *DWDirectiveRule, s conversion.Scope) error { + return autoConvert_v1alpha3_DWDirectiveRule_To_v1alpha2_DWDirectiveRule(in, out, s) +} + +func autoConvert_v1alpha2_DWDirectiveRuleList_To_v1alpha3_DWDirectiveRuleList(in *DWDirectiveRuleList, out *v1alpha3.DWDirectiveRuleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.DWDirectiveRule)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_DWDirectiveRuleList_To_v1alpha3_DWDirectiveRuleList is an autogenerated conversion function. +func Convert_v1alpha2_DWDirectiveRuleList_To_v1alpha3_DWDirectiveRuleList(in *DWDirectiveRuleList, out *v1alpha3.DWDirectiveRuleList, s conversion.Scope) error { + return autoConvert_v1alpha2_DWDirectiveRuleList_To_v1alpha3_DWDirectiveRuleList(in, out, s) +} + +func autoConvert_v1alpha3_DWDirectiveRuleList_To_v1alpha2_DWDirectiveRuleList(in *v1alpha3.DWDirectiveRuleList, out *DWDirectiveRuleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]DWDirectiveRule)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_DWDirectiveRuleList_To_v1alpha2_DWDirectiveRuleList is an autogenerated conversion function. +func Convert_v1alpha3_DWDirectiveRuleList_To_v1alpha2_DWDirectiveRuleList(in *v1alpha3.DWDirectiveRuleList, out *DWDirectiveRuleList, s conversion.Scope) error { + return autoConvert_v1alpha3_DWDirectiveRuleList_To_v1alpha2_DWDirectiveRuleList(in, out, s) +} + +func autoConvert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown(in *DirectiveBreakdown, out *v1alpha3.DirectiveBreakdown, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown is an autogenerated conversion function. +func Convert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown(in *DirectiveBreakdown, out *v1alpha3.DirectiveBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha2_DirectiveBreakdown_To_v1alpha3_DirectiveBreakdown(in, out, s) +} + +func autoConvert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown(in *v1alpha3.DirectiveBreakdown, out *DirectiveBreakdown, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown is an autogenerated conversion function. +func Convert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown(in *v1alpha3.DirectiveBreakdown, out *DirectiveBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha3_DirectiveBreakdown_To_v1alpha2_DirectiveBreakdown(in, out, s) +} + +func autoConvert_v1alpha2_DirectiveBreakdownList_To_v1alpha3_DirectiveBreakdownList(in *DirectiveBreakdownList, out *v1alpha3.DirectiveBreakdownList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.DirectiveBreakdown)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_DirectiveBreakdownList_To_v1alpha3_DirectiveBreakdownList is an autogenerated conversion function. +func Convert_v1alpha2_DirectiveBreakdownList_To_v1alpha3_DirectiveBreakdownList(in *DirectiveBreakdownList, out *v1alpha3.DirectiveBreakdownList, s conversion.Scope) error { + return autoConvert_v1alpha2_DirectiveBreakdownList_To_v1alpha3_DirectiveBreakdownList(in, out, s) +} + +func autoConvert_v1alpha3_DirectiveBreakdownList_To_v1alpha2_DirectiveBreakdownList(in *v1alpha3.DirectiveBreakdownList, out *DirectiveBreakdownList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]DirectiveBreakdown)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_DirectiveBreakdownList_To_v1alpha2_DirectiveBreakdownList is an autogenerated conversion function. +func Convert_v1alpha3_DirectiveBreakdownList_To_v1alpha2_DirectiveBreakdownList(in *v1alpha3.DirectiveBreakdownList, out *DirectiveBreakdownList, s conversion.Scope) error { + return autoConvert_v1alpha3_DirectiveBreakdownList_To_v1alpha2_DirectiveBreakdownList(in, out, s) +} + +func autoConvert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec(in *DirectiveBreakdownSpec, out *v1alpha3.DirectiveBreakdownSpec, s conversion.Scope) error { + out.Directive = in.Directive + out.UserID = in.UserID + return nil +} + +// Convert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec is an autogenerated conversion function. +func Convert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec(in *DirectiveBreakdownSpec, out *v1alpha3.DirectiveBreakdownSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_DirectiveBreakdownSpec_To_v1alpha3_DirectiveBreakdownSpec(in, out, s) +} + +func autoConvert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec(in *v1alpha3.DirectiveBreakdownSpec, out *DirectiveBreakdownSpec, s conversion.Scope) error { + out.Directive = in.Directive + out.UserID = in.UserID + return nil +} + +// Convert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec is an autogenerated conversion function. +func Convert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec(in *v1alpha3.DirectiveBreakdownSpec, out *DirectiveBreakdownSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_DirectiveBreakdownSpec_To_v1alpha2_DirectiveBreakdownSpec(in, out, s) +} + +func autoConvert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus(in *DirectiveBreakdownStatus, out *v1alpha3.DirectiveBreakdownStatus, s conversion.Scope) error { + out.Storage = (*v1alpha3.StorageBreakdown)(unsafe.Pointer(in.Storage)) + out.Compute = (*v1alpha3.ComputeBreakdown)(unsafe.Pointer(in.Compute)) + out.Ready = in.Ready + out.RequiredDaemons = *(*[]string)(unsafe.Pointer(&in.RequiredDaemons)) + if err := Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus is an autogenerated conversion function. +func Convert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus(in *DirectiveBreakdownStatus, out *v1alpha3.DirectiveBreakdownStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_DirectiveBreakdownStatus_To_v1alpha3_DirectiveBreakdownStatus(in, out, s) +} + +func autoConvert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus(in *v1alpha3.DirectiveBreakdownStatus, out *DirectiveBreakdownStatus, s conversion.Scope) error { + out.Storage = (*StorageBreakdown)(unsafe.Pointer(in.Storage)) + out.Compute = (*ComputeBreakdown)(unsafe.Pointer(in.Compute)) + out.Ready = in.Ready + out.RequiredDaemons = *(*[]string)(unsafe.Pointer(&in.RequiredDaemons)) + if err := Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus is an autogenerated conversion function. +func Convert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus(in *v1alpha3.DirectiveBreakdownStatus, out *DirectiveBreakdownStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_DirectiveBreakdownStatus_To_v1alpha2_DirectiveBreakdownStatus(in, out, s) +} + +func autoConvert_v1alpha2_Node_To_v1alpha3_Node(in *Node, out *v1alpha3.Node, s conversion.Scope) error { + out.Name = in.Name + out.Status = v1alpha3.ResourceStatus(in.Status) + return nil +} + +// Convert_v1alpha2_Node_To_v1alpha3_Node is an autogenerated conversion function. +func Convert_v1alpha2_Node_To_v1alpha3_Node(in *Node, out *v1alpha3.Node, s conversion.Scope) error { + return autoConvert_v1alpha2_Node_To_v1alpha3_Node(in, out, s) +} + +func autoConvert_v1alpha3_Node_To_v1alpha2_Node(in *v1alpha3.Node, out *Node, s conversion.Scope) error { + out.Name = in.Name + out.Status = ResourceStatus(in.Status) + return nil +} + +// Convert_v1alpha3_Node_To_v1alpha2_Node is an autogenerated conversion function. +func Convert_v1alpha3_Node_To_v1alpha2_Node(in *v1alpha3.Node, out *Node, s conversion.Scope) error { + return autoConvert_v1alpha3_Node_To_v1alpha2_Node(in, out, s) +} + +func autoConvert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(in *PersistentStorageInstance, out *v1alpha3.PersistentStorageInstance, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance is an autogenerated conversion function. +func Convert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(in *PersistentStorageInstance, out *v1alpha3.PersistentStorageInstance, s conversion.Scope) error { + return autoConvert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(in, out, s) +} + +func autoConvert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(in *v1alpha3.PersistentStorageInstance, out *PersistentStorageInstance, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_PersistentStorageInstanceStatus_To_v1alpha2_PersistentStorageInstanceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance is an autogenerated conversion function. +func Convert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(in *v1alpha3.PersistentStorageInstance, out *PersistentStorageInstance, s conversion.Scope) error { + return autoConvert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(in, out, s) +} + +func autoConvert_v1alpha2_PersistentStorageInstanceList_To_v1alpha3_PersistentStorageInstanceList(in *PersistentStorageInstanceList, out *v1alpha3.PersistentStorageInstanceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha3.PersistentStorageInstance, len(*in)) + for i := range *in { + if err := Convert_v1alpha2_PersistentStorageInstance_To_v1alpha3_PersistentStorageInstance(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha2_PersistentStorageInstanceList_To_v1alpha3_PersistentStorageInstanceList is an autogenerated conversion function. +func Convert_v1alpha2_PersistentStorageInstanceList_To_v1alpha3_PersistentStorageInstanceList(in *PersistentStorageInstanceList, out *v1alpha3.PersistentStorageInstanceList, s conversion.Scope) error { + return autoConvert_v1alpha2_PersistentStorageInstanceList_To_v1alpha3_PersistentStorageInstanceList(in, out, s) +} + +func autoConvert_v1alpha3_PersistentStorageInstanceList_To_v1alpha2_PersistentStorageInstanceList(in *v1alpha3.PersistentStorageInstanceList, out *PersistentStorageInstanceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentStorageInstance, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_PersistentStorageInstance_To_v1alpha2_PersistentStorageInstance(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha3_PersistentStorageInstanceList_To_v1alpha2_PersistentStorageInstanceList is an autogenerated conversion function. +func Convert_v1alpha3_PersistentStorageInstanceList_To_v1alpha2_PersistentStorageInstanceList(in *v1alpha3.PersistentStorageInstanceList, out *PersistentStorageInstanceList, s conversion.Scope) error { + return autoConvert_v1alpha3_PersistentStorageInstanceList_To_v1alpha2_PersistentStorageInstanceList(in, out, s) +} + +func autoConvert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec(in *PersistentStorageInstanceSpec, out *v1alpha3.PersistentStorageInstanceSpec, s conversion.Scope) error { + out.Name = in.Name + out.FsType = in.FsType + out.DWDirective = in.DWDirective + out.UserID = in.UserID + out.State = v1alpha3.PersistentStorageInstanceState(in.State) + out.ConsumerReferences = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ConsumerReferences)) + return nil +} + +// Convert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec is an autogenerated conversion function. +func Convert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec(in *PersistentStorageInstanceSpec, out *v1alpha3.PersistentStorageInstanceSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_PersistentStorageInstanceSpec_To_v1alpha3_PersistentStorageInstanceSpec(in, out, s) +} + +func autoConvert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec(in *v1alpha3.PersistentStorageInstanceSpec, out *PersistentStorageInstanceSpec, s conversion.Scope) error { + out.Name = in.Name + out.FsType = in.FsType + out.DWDirective = in.DWDirective + out.UserID = in.UserID + out.State = PersistentStorageInstanceState(in.State) + out.ConsumerReferences = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.ConsumerReferences)) + return nil +} + +// Convert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec is an autogenerated conversion function. +func Convert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec(in *v1alpha3.PersistentStorageInstanceSpec, out *PersistentStorageInstanceSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_PersistentStorageInstanceSpec_To_v1alpha2_PersistentStorageInstanceSpec(in, out, s) +} + +func autoConvert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus(in *PersistentStorageInstanceStatus, out *v1alpha3.PersistentStorageInstanceStatus, s conversion.Scope) error { + out.Servers = in.Servers + out.State = v1alpha3.PersistentStorageInstanceState(in.State) + if err := Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus is an autogenerated conversion function. +func Convert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus(in *PersistentStorageInstanceStatus, out *v1alpha3.PersistentStorageInstanceStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_PersistentStorageInstanceStatus_To_v1alpha3_PersistentStorageInstanceStatus(in, out, s) +} + +func autoConvert_v1alpha3_PersistentStorageInstanceStatus_To_v1alpha2_PersistentStorageInstanceStatus(in *v1alpha3.PersistentStorageInstanceStatus, out *PersistentStorageInstanceStatus, s conversion.Scope) error { + out.Servers = in.Servers + out.State = PersistentStorageInstanceState(in.State) + // WARNING: in.Ready requires manual conversion: does not exist in peer-type + if err := Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(in *ResourceError, out *v1alpha3.ResourceError, s conversion.Scope) error { + out.Error = (*v1alpha3.ResourceErrorInfo)(unsafe.Pointer(in.Error)) + return nil +} + +// Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError is an autogenerated conversion function. +func Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(in *ResourceError, out *v1alpha3.ResourceError, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(in, out, s) +} + +func autoConvert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(in *v1alpha3.ResourceError, out *ResourceError, s conversion.Scope) error { + out.Error = (*ResourceErrorInfo)(unsafe.Pointer(in.Error)) + return nil +} + +// Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError is an autogenerated conversion function. +func Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(in *v1alpha3.ResourceError, out *ResourceError, s conversion.Scope) error { + return autoConvert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(in, out, s) +} + +func autoConvert_v1alpha2_ResourceErrorInfo_To_v1alpha3_ResourceErrorInfo(in *ResourceErrorInfo, out *v1alpha3.ResourceErrorInfo, s conversion.Scope) error { + out.UserMessage = in.UserMessage + out.DebugMessage = in.DebugMessage + out.Type = v1alpha3.ResourceErrorType(in.Type) + out.Severity = v1alpha3.ResourceErrorSeverity(in.Severity) + return nil +} + +// Convert_v1alpha2_ResourceErrorInfo_To_v1alpha3_ResourceErrorInfo is an autogenerated conversion function. +func Convert_v1alpha2_ResourceErrorInfo_To_v1alpha3_ResourceErrorInfo(in *ResourceErrorInfo, out *v1alpha3.ResourceErrorInfo, s conversion.Scope) error { + return autoConvert_v1alpha2_ResourceErrorInfo_To_v1alpha3_ResourceErrorInfo(in, out, s) +} + +func autoConvert_v1alpha3_ResourceErrorInfo_To_v1alpha2_ResourceErrorInfo(in *v1alpha3.ResourceErrorInfo, out *ResourceErrorInfo, s conversion.Scope) error { + out.UserMessage = in.UserMessage + out.DebugMessage = in.DebugMessage + out.Type = ResourceErrorType(in.Type) + out.Severity = ResourceErrorSeverity(in.Severity) + return nil +} + +// Convert_v1alpha3_ResourceErrorInfo_To_v1alpha2_ResourceErrorInfo is an autogenerated conversion function. +func Convert_v1alpha3_ResourceErrorInfo_To_v1alpha2_ResourceErrorInfo(in *v1alpha3.ResourceErrorInfo, out *ResourceErrorInfo, s conversion.Scope) error { + return autoConvert_v1alpha3_ResourceErrorInfo_To_v1alpha2_ResourceErrorInfo(in, out, s) +} + +func autoConvert_v1alpha2_Servers_To_v1alpha3_Servers(in *Servers, out *v1alpha3.Servers, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_Servers_To_v1alpha3_Servers is an autogenerated conversion function. +func Convert_v1alpha2_Servers_To_v1alpha3_Servers(in *Servers, out *v1alpha3.Servers, s conversion.Scope) error { + return autoConvert_v1alpha2_Servers_To_v1alpha3_Servers(in, out, s) +} + +func autoConvert_v1alpha3_Servers_To_v1alpha2_Servers(in *v1alpha3.Servers, out *Servers, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_Servers_To_v1alpha2_Servers is an autogenerated conversion function. +func Convert_v1alpha3_Servers_To_v1alpha2_Servers(in *v1alpha3.Servers, out *Servers, s conversion.Scope) error { + return autoConvert_v1alpha3_Servers_To_v1alpha2_Servers(in, out, s) +} + +func autoConvert_v1alpha2_ServersList_To_v1alpha3_ServersList(in *ServersList, out *v1alpha3.ServersList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.Servers)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_ServersList_To_v1alpha3_ServersList is an autogenerated conversion function. +func Convert_v1alpha2_ServersList_To_v1alpha3_ServersList(in *ServersList, out *v1alpha3.ServersList, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersList_To_v1alpha3_ServersList(in, out, s) +} + +func autoConvert_v1alpha3_ServersList_To_v1alpha2_ServersList(in *v1alpha3.ServersList, out *ServersList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Servers)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_ServersList_To_v1alpha2_ServersList is an autogenerated conversion function. +func Convert_v1alpha3_ServersList_To_v1alpha2_ServersList(in *v1alpha3.ServersList, out *ServersList, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersList_To_v1alpha2_ServersList(in, out, s) +} + +func autoConvert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec(in *ServersSpec, out *v1alpha3.ServersSpec, s conversion.Scope) error { + out.AllocationSets = *(*[]v1alpha3.ServersSpecAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec is an autogenerated conversion function. +func Convert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec(in *ServersSpec, out *v1alpha3.ServersSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersSpec_To_v1alpha3_ServersSpec(in, out, s) +} + +func autoConvert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec(in *v1alpha3.ServersSpec, out *ServersSpec, s conversion.Scope) error { + out.AllocationSets = *(*[]ServersSpecAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec is an autogenerated conversion function. +func Convert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec(in *v1alpha3.ServersSpec, out *ServersSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersSpec_To_v1alpha2_ServersSpec(in, out, s) +} + +func autoConvert_v1alpha2_ServersSpecAllocationSet_To_v1alpha3_ServersSpecAllocationSet(in *ServersSpecAllocationSet, out *v1alpha3.ServersSpecAllocationSet, s conversion.Scope) error { + out.Label = in.Label + out.AllocationSize = in.AllocationSize + out.Storage = *(*[]v1alpha3.ServersSpecStorage)(unsafe.Pointer(&in.Storage)) + return nil +} + +// Convert_v1alpha2_ServersSpecAllocationSet_To_v1alpha3_ServersSpecAllocationSet is an autogenerated conversion function. +func Convert_v1alpha2_ServersSpecAllocationSet_To_v1alpha3_ServersSpecAllocationSet(in *ServersSpecAllocationSet, out *v1alpha3.ServersSpecAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersSpecAllocationSet_To_v1alpha3_ServersSpecAllocationSet(in, out, s) +} + +func autoConvert_v1alpha3_ServersSpecAllocationSet_To_v1alpha2_ServersSpecAllocationSet(in *v1alpha3.ServersSpecAllocationSet, out *ServersSpecAllocationSet, s conversion.Scope) error { + out.Label = in.Label + out.AllocationSize = in.AllocationSize + out.Storage = *(*[]ServersSpecStorage)(unsafe.Pointer(&in.Storage)) + return nil +} + +// Convert_v1alpha3_ServersSpecAllocationSet_To_v1alpha2_ServersSpecAllocationSet is an autogenerated conversion function. +func Convert_v1alpha3_ServersSpecAllocationSet_To_v1alpha2_ServersSpecAllocationSet(in *v1alpha3.ServersSpecAllocationSet, out *ServersSpecAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersSpecAllocationSet_To_v1alpha2_ServersSpecAllocationSet(in, out, s) +} + +func autoConvert_v1alpha2_ServersSpecStorage_To_v1alpha3_ServersSpecStorage(in *ServersSpecStorage, out *v1alpha3.ServersSpecStorage, s conversion.Scope) error { + out.Name = in.Name + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha2_ServersSpecStorage_To_v1alpha3_ServersSpecStorage is an autogenerated conversion function. +func Convert_v1alpha2_ServersSpecStorage_To_v1alpha3_ServersSpecStorage(in *ServersSpecStorage, out *v1alpha3.ServersSpecStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersSpecStorage_To_v1alpha3_ServersSpecStorage(in, out, s) +} + +func autoConvert_v1alpha3_ServersSpecStorage_To_v1alpha2_ServersSpecStorage(in *v1alpha3.ServersSpecStorage, out *ServersSpecStorage, s conversion.Scope) error { + out.Name = in.Name + out.AllocationCount = in.AllocationCount + return nil +} + +// Convert_v1alpha3_ServersSpecStorage_To_v1alpha2_ServersSpecStorage is an autogenerated conversion function. +func Convert_v1alpha3_ServersSpecStorage_To_v1alpha2_ServersSpecStorage(in *v1alpha3.ServersSpecStorage, out *ServersSpecStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersSpecStorage_To_v1alpha2_ServersSpecStorage(in, out, s) +} + +func autoConvert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus(in *ServersStatus, out *v1alpha3.ServersStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.LastUpdate = (*metav1.MicroTime)(unsafe.Pointer(in.LastUpdate)) + out.AllocationSets = *(*[]v1alpha3.ServersStatusAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + if err := Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus is an autogenerated conversion function. +func Convert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus(in *ServersStatus, out *v1alpha3.ServersStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersStatus_To_v1alpha3_ServersStatus(in, out, s) +} + +func autoConvert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus(in *v1alpha3.ServersStatus, out *ServersStatus, s conversion.Scope) error { + out.Ready = in.Ready + out.LastUpdate = (*metav1.MicroTime)(unsafe.Pointer(in.LastUpdate)) + out.AllocationSets = *(*[]ServersStatusAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + if err := Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus is an autogenerated conversion function. +func Convert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus(in *v1alpha3.ServersStatus, out *ServersStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersStatus_To_v1alpha2_ServersStatus(in, out, s) +} + +func autoConvert_v1alpha2_ServersStatusAllocationSet_To_v1alpha3_ServersStatusAllocationSet(in *ServersStatusAllocationSet, out *v1alpha3.ServersStatusAllocationSet, s conversion.Scope) error { + out.Label = in.Label + out.Storage = *(*map[string]v1alpha3.ServersStatusStorage)(unsafe.Pointer(&in.Storage)) + return nil +} + +// Convert_v1alpha2_ServersStatusAllocationSet_To_v1alpha3_ServersStatusAllocationSet is an autogenerated conversion function. +func Convert_v1alpha2_ServersStatusAllocationSet_To_v1alpha3_ServersStatusAllocationSet(in *ServersStatusAllocationSet, out *v1alpha3.ServersStatusAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersStatusAllocationSet_To_v1alpha3_ServersStatusAllocationSet(in, out, s) +} + +func autoConvert_v1alpha3_ServersStatusAllocationSet_To_v1alpha2_ServersStatusAllocationSet(in *v1alpha3.ServersStatusAllocationSet, out *ServersStatusAllocationSet, s conversion.Scope) error { + out.Label = in.Label + out.Storage = *(*map[string]ServersStatusStorage)(unsafe.Pointer(&in.Storage)) + return nil +} + +// Convert_v1alpha3_ServersStatusAllocationSet_To_v1alpha2_ServersStatusAllocationSet is an autogenerated conversion function. +func Convert_v1alpha3_ServersStatusAllocationSet_To_v1alpha2_ServersStatusAllocationSet(in *v1alpha3.ServersStatusAllocationSet, out *ServersStatusAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersStatusAllocationSet_To_v1alpha2_ServersStatusAllocationSet(in, out, s) +} + +func autoConvert_v1alpha2_ServersStatusStorage_To_v1alpha3_ServersStatusStorage(in *ServersStatusStorage, out *v1alpha3.ServersStatusStorage, s conversion.Scope) error { + out.AllocationSize = in.AllocationSize + return nil +} + +// Convert_v1alpha2_ServersStatusStorage_To_v1alpha3_ServersStatusStorage is an autogenerated conversion function. +func Convert_v1alpha2_ServersStatusStorage_To_v1alpha3_ServersStatusStorage(in *ServersStatusStorage, out *v1alpha3.ServersStatusStorage, s conversion.Scope) error { + return autoConvert_v1alpha2_ServersStatusStorage_To_v1alpha3_ServersStatusStorage(in, out, s) +} + +func autoConvert_v1alpha3_ServersStatusStorage_To_v1alpha2_ServersStatusStorage(in *v1alpha3.ServersStatusStorage, out *ServersStatusStorage, s conversion.Scope) error { + out.AllocationSize = in.AllocationSize + return nil +} + +// Convert_v1alpha3_ServersStatusStorage_To_v1alpha2_ServersStatusStorage is an autogenerated conversion function. +func Convert_v1alpha3_ServersStatusStorage_To_v1alpha2_ServersStatusStorage(in *v1alpha3.ServersStatusStorage, out *ServersStatusStorage, s conversion.Scope) error { + return autoConvert_v1alpha3_ServersStatusStorage_To_v1alpha2_ServersStatusStorage(in, out, s) +} + +func autoConvert_v1alpha2_Storage_To_v1alpha3_Storage(in *Storage, out *v1alpha3.Storage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_Storage_To_v1alpha3_Storage is an autogenerated conversion function. +func Convert_v1alpha2_Storage_To_v1alpha3_Storage(in *Storage, out *v1alpha3.Storage, s conversion.Scope) error { + return autoConvert_v1alpha2_Storage_To_v1alpha3_Storage(in, out, s) +} + +func autoConvert_v1alpha3_Storage_To_v1alpha2_Storage(in *v1alpha3.Storage, out *Storage, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_Storage_To_v1alpha2_Storage is an autogenerated conversion function. +func Convert_v1alpha3_Storage_To_v1alpha2_Storage(in *v1alpha3.Storage, out *Storage, s conversion.Scope) error { + return autoConvert_v1alpha3_Storage_To_v1alpha2_Storage(in, out, s) +} + +func autoConvert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess(in *StorageAccess, out *v1alpha3.StorageAccess, s conversion.Scope) error { + out.Protocol = v1alpha3.StorageAccessProtocol(in.Protocol) + out.Servers = *(*[]v1alpha3.Node)(unsafe.Pointer(&in.Servers)) + out.Computes = *(*[]v1alpha3.Node)(unsafe.Pointer(&in.Computes)) + return nil +} + +// Convert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess is an autogenerated conversion function. +func Convert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess(in *StorageAccess, out *v1alpha3.StorageAccess, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess(in, out, s) +} + +func autoConvert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess(in *v1alpha3.StorageAccess, out *StorageAccess, s conversion.Scope) error { + out.Protocol = StorageAccessProtocol(in.Protocol) + out.Servers = *(*[]Node)(unsafe.Pointer(&in.Servers)) + out.Computes = *(*[]Node)(unsafe.Pointer(&in.Computes)) + return nil +} + +// Convert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess is an autogenerated conversion function. +func Convert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess(in *v1alpha3.StorageAccess, out *StorageAccess, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess(in, out, s) +} + +func autoConvert_v1alpha2_StorageAllocationSet_To_v1alpha3_StorageAllocationSet(in *StorageAllocationSet, out *v1alpha3.StorageAllocationSet, s conversion.Scope) error { + out.AllocationStrategy = v1alpha3.AllocationStrategy(in.AllocationStrategy) + out.MinimumCapacity = in.MinimumCapacity + out.Label = in.Label + if err := Convert_v1alpha2_AllocationSetConstraints_To_v1alpha3_AllocationSetConstraints(&in.Constraints, &out.Constraints, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_StorageAllocationSet_To_v1alpha3_StorageAllocationSet is an autogenerated conversion function. +func Convert_v1alpha2_StorageAllocationSet_To_v1alpha3_StorageAllocationSet(in *StorageAllocationSet, out *v1alpha3.StorageAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageAllocationSet_To_v1alpha3_StorageAllocationSet(in, out, s) +} + +func autoConvert_v1alpha3_StorageAllocationSet_To_v1alpha2_StorageAllocationSet(in *v1alpha3.StorageAllocationSet, out *StorageAllocationSet, s conversion.Scope) error { + out.AllocationStrategy = AllocationStrategy(in.AllocationStrategy) + out.MinimumCapacity = in.MinimumCapacity + out.Label = in.Label + if err := Convert_v1alpha3_AllocationSetConstraints_To_v1alpha2_AllocationSetConstraints(&in.Constraints, &out.Constraints, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_StorageAllocationSet_To_v1alpha2_StorageAllocationSet is an autogenerated conversion function. +func Convert_v1alpha3_StorageAllocationSet_To_v1alpha2_StorageAllocationSet(in *v1alpha3.StorageAllocationSet, out *StorageAllocationSet, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageAllocationSet_To_v1alpha2_StorageAllocationSet(in, out, s) +} + +func autoConvert_v1alpha2_StorageBreakdown_To_v1alpha3_StorageBreakdown(in *StorageBreakdown, out *v1alpha3.StorageBreakdown, s conversion.Scope) error { + out.Lifetime = in.Lifetime + out.Reference = in.Reference + out.AllocationSets = *(*[]v1alpha3.StorageAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha2_StorageBreakdown_To_v1alpha3_StorageBreakdown is an autogenerated conversion function. +func Convert_v1alpha2_StorageBreakdown_To_v1alpha3_StorageBreakdown(in *StorageBreakdown, out *v1alpha3.StorageBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageBreakdown_To_v1alpha3_StorageBreakdown(in, out, s) +} + +func autoConvert_v1alpha3_StorageBreakdown_To_v1alpha2_StorageBreakdown(in *v1alpha3.StorageBreakdown, out *StorageBreakdown, s conversion.Scope) error { + out.Lifetime = in.Lifetime + out.Reference = in.Reference + out.AllocationSets = *(*[]StorageAllocationSet)(unsafe.Pointer(&in.AllocationSets)) + return nil +} + +// Convert_v1alpha3_StorageBreakdown_To_v1alpha2_StorageBreakdown is an autogenerated conversion function. +func Convert_v1alpha3_StorageBreakdown_To_v1alpha2_StorageBreakdown(in *v1alpha3.StorageBreakdown, out *StorageBreakdown, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageBreakdown_To_v1alpha2_StorageBreakdown(in, out, s) +} + +func autoConvert_v1alpha2_StorageDevice_To_v1alpha3_StorageDevice(in *StorageDevice, out *v1alpha3.StorageDevice, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = (*int64)(unsafe.Pointer(in.WearLevel)) + out.Status = v1alpha3.ResourceStatus(in.Status) + return nil +} + +// Convert_v1alpha2_StorageDevice_To_v1alpha3_StorageDevice is an autogenerated conversion function. +func Convert_v1alpha2_StorageDevice_To_v1alpha3_StorageDevice(in *StorageDevice, out *v1alpha3.StorageDevice, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageDevice_To_v1alpha3_StorageDevice(in, out, s) +} + +func autoConvert_v1alpha3_StorageDevice_To_v1alpha2_StorageDevice(in *v1alpha3.StorageDevice, out *StorageDevice, s conversion.Scope) error { + out.Model = in.Model + out.SerialNumber = in.SerialNumber + out.FirmwareVersion = in.FirmwareVersion + out.Slot = in.Slot + out.Capacity = in.Capacity + out.WearLevel = (*int64)(unsafe.Pointer(in.WearLevel)) + out.Status = ResourceStatus(in.Status) + return nil +} + +// Convert_v1alpha3_StorageDevice_To_v1alpha2_StorageDevice is an autogenerated conversion function. +func Convert_v1alpha3_StorageDevice_To_v1alpha2_StorageDevice(in *v1alpha3.StorageDevice, out *StorageDevice, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageDevice_To_v1alpha2_StorageDevice(in, out, s) +} + +func autoConvert_v1alpha2_StorageList_To_v1alpha3_StorageList(in *StorageList, out *v1alpha3.StorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.Storage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_StorageList_To_v1alpha3_StorageList is an autogenerated conversion function. +func Convert_v1alpha2_StorageList_To_v1alpha3_StorageList(in *StorageList, out *v1alpha3.StorageList, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageList_To_v1alpha3_StorageList(in, out, s) +} + +func autoConvert_v1alpha3_StorageList_To_v1alpha2_StorageList(in *v1alpha3.StorageList, out *StorageList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Storage)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_StorageList_To_v1alpha2_StorageList is an autogenerated conversion function. +func Convert_v1alpha3_StorageList_To_v1alpha2_StorageList(in *v1alpha3.StorageList, out *StorageList, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageList_To_v1alpha2_StorageList(in, out, s) +} + +func autoConvert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec(in *StorageSpec, out *v1alpha3.StorageSpec, s conversion.Scope) error { + out.State = v1alpha3.ResourceState(in.State) + out.Mode = in.Mode + return nil +} + +// Convert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec is an autogenerated conversion function. +func Convert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec(in *StorageSpec, out *v1alpha3.StorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageSpec_To_v1alpha3_StorageSpec(in, out, s) +} + +func autoConvert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec(in *v1alpha3.StorageSpec, out *StorageSpec, s conversion.Scope) error { + out.State = ResourceState(in.State) + out.Mode = in.Mode + return nil +} + +// Convert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec is an autogenerated conversion function. +func Convert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec(in *v1alpha3.StorageSpec, out *StorageSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageSpec_To_v1alpha2_StorageSpec(in, out, s) +} + +func autoConvert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus(in *StorageStatus, out *v1alpha3.StorageStatus, s conversion.Scope) error { + out.Type = v1alpha3.StorageType(in.Type) + out.Devices = *(*[]v1alpha3.StorageDevice)(unsafe.Pointer(&in.Devices)) + if err := Convert_v1alpha2_StorageAccess_To_v1alpha3_StorageAccess(&in.Access, &out.Access, s); err != nil { + return err + } + out.Capacity = in.Capacity + out.Status = v1alpha3.ResourceStatus(in.Status) + out.RebootRequired = in.RebootRequired + out.Message = in.Message + return nil +} + +// Convert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus is an autogenerated conversion function. +func Convert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus(in *StorageStatus, out *v1alpha3.StorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_StorageStatus_To_v1alpha3_StorageStatus(in, out, s) +} + +func autoConvert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus(in *v1alpha3.StorageStatus, out *StorageStatus, s conversion.Scope) error { + out.Type = StorageType(in.Type) + out.Devices = *(*[]StorageDevice)(unsafe.Pointer(&in.Devices)) + if err := Convert_v1alpha3_StorageAccess_To_v1alpha2_StorageAccess(&in.Access, &out.Access, s); err != nil { + return err + } + out.Capacity = in.Capacity + out.Status = ResourceStatus(in.Status) + out.RebootRequired = in.RebootRequired + out.Message = in.Message + return nil +} + +// Convert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus is an autogenerated conversion function. +func Convert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus(in *v1alpha3.StorageStatus, out *StorageStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_StorageStatus_To_v1alpha2_StorageStatus(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration(in *SystemConfiguration, out *v1alpha3.SystemConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration(in *SystemConfiguration, out *v1alpha3.SystemConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfiguration_To_v1alpha3_SystemConfiguration(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration(in *v1alpha3.SystemConfiguration, out *SystemConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration(in *v1alpha3.SystemConfiguration, out *SystemConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfiguration_To_v1alpha2_SystemConfiguration(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationComputeNodeReference_To_v1alpha3_SystemConfigurationComputeNodeReference(in *SystemConfigurationComputeNodeReference, out *v1alpha3.SystemConfigurationComputeNodeReference, s conversion.Scope) error { + out.Name = in.Name + out.Index = in.Index + return nil +} + +// Convert_v1alpha2_SystemConfigurationComputeNodeReference_To_v1alpha3_SystemConfigurationComputeNodeReference is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationComputeNodeReference_To_v1alpha3_SystemConfigurationComputeNodeReference(in *SystemConfigurationComputeNodeReference, out *v1alpha3.SystemConfigurationComputeNodeReference, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationComputeNodeReference_To_v1alpha3_SystemConfigurationComputeNodeReference(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationComputeNodeReference_To_v1alpha2_SystemConfigurationComputeNodeReference(in *v1alpha3.SystemConfigurationComputeNodeReference, out *SystemConfigurationComputeNodeReference, s conversion.Scope) error { + out.Name = in.Name + out.Index = in.Index + return nil +} + +// Convert_v1alpha3_SystemConfigurationComputeNodeReference_To_v1alpha2_SystemConfigurationComputeNodeReference is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationComputeNodeReference_To_v1alpha2_SystemConfigurationComputeNodeReference(in *v1alpha3.SystemConfigurationComputeNodeReference, out *SystemConfigurationComputeNodeReference, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationComputeNodeReference_To_v1alpha2_SystemConfigurationComputeNodeReference(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationExternalComputeNode_To_v1alpha3_SystemConfigurationExternalComputeNode(in *SystemConfigurationExternalComputeNode, out *v1alpha3.SystemConfigurationExternalComputeNode, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1alpha2_SystemConfigurationExternalComputeNode_To_v1alpha3_SystemConfigurationExternalComputeNode is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationExternalComputeNode_To_v1alpha3_SystemConfigurationExternalComputeNode(in *SystemConfigurationExternalComputeNode, out *v1alpha3.SystemConfigurationExternalComputeNode, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationExternalComputeNode_To_v1alpha3_SystemConfigurationExternalComputeNode(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationExternalComputeNode_To_v1alpha2_SystemConfigurationExternalComputeNode(in *v1alpha3.SystemConfigurationExternalComputeNode, out *SystemConfigurationExternalComputeNode, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1alpha3_SystemConfigurationExternalComputeNode_To_v1alpha2_SystemConfigurationExternalComputeNode is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationExternalComputeNode_To_v1alpha2_SystemConfigurationExternalComputeNode(in *v1alpha3.SystemConfigurationExternalComputeNode, out *SystemConfigurationExternalComputeNode, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationExternalComputeNode_To_v1alpha2_SystemConfigurationExternalComputeNode(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationList_To_v1alpha3_SystemConfigurationList(in *SystemConfigurationList, out *v1alpha3.SystemConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.SystemConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_SystemConfigurationList_To_v1alpha3_SystemConfigurationList is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationList_To_v1alpha3_SystemConfigurationList(in *SystemConfigurationList, out *v1alpha3.SystemConfigurationList, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationList_To_v1alpha3_SystemConfigurationList(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationList_To_v1alpha2_SystemConfigurationList(in *v1alpha3.SystemConfigurationList, out *SystemConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]SystemConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_SystemConfigurationList_To_v1alpha2_SystemConfigurationList is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationList_To_v1alpha2_SystemConfigurationList(in *v1alpha3.SystemConfigurationList, out *SystemConfigurationList, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationList_To_v1alpha2_SystemConfigurationList(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec(in *SystemConfigurationSpec, out *v1alpha3.SystemConfigurationSpec, s conversion.Scope) error { + out.ExternalComputeNodes = *(*[]v1alpha3.SystemConfigurationExternalComputeNode)(unsafe.Pointer(&in.ExternalComputeNodes)) + out.StorageNodes = *(*[]v1alpha3.SystemConfigurationStorageNode)(unsafe.Pointer(&in.StorageNodes)) + out.Ports = *(*[]intstr.IntOrString)(unsafe.Pointer(&in.Ports)) + out.PortsCooldownInSeconds = in.PortsCooldownInSeconds + return nil +} + +// Convert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec(in *SystemConfigurationSpec, out *v1alpha3.SystemConfigurationSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationSpec_To_v1alpha3_SystemConfigurationSpec(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec(in *v1alpha3.SystemConfigurationSpec, out *SystemConfigurationSpec, s conversion.Scope) error { + out.ExternalComputeNodes = *(*[]SystemConfigurationExternalComputeNode)(unsafe.Pointer(&in.ExternalComputeNodes)) + out.StorageNodes = *(*[]SystemConfigurationStorageNode)(unsafe.Pointer(&in.StorageNodes)) + out.Ports = *(*[]intstr.IntOrString)(unsafe.Pointer(&in.Ports)) + out.PortsCooldownInSeconds = in.PortsCooldownInSeconds + return nil +} + +// Convert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec(in *v1alpha3.SystemConfigurationSpec, out *SystemConfigurationSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationSpec_To_v1alpha2_SystemConfigurationSpec(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus(in *SystemConfigurationStatus, out *v1alpha3.SystemConfigurationStatus, s conversion.Scope) error { + out.Ready = in.Ready + if err := Convert_v1alpha2_ResourceError_To_v1alpha3_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus(in *SystemConfigurationStatus, out *v1alpha3.SystemConfigurationStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationStatus_To_v1alpha3_SystemConfigurationStatus(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus(in *v1alpha3.SystemConfigurationStatus, out *SystemConfigurationStatus, s conversion.Scope) error { + out.Ready = in.Ready + if err := Convert_v1alpha3_ResourceError_To_v1alpha2_ResourceError(&in.ResourceError, &out.ResourceError, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus(in *v1alpha3.SystemConfigurationStatus, out *SystemConfigurationStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationStatus_To_v1alpha2_SystemConfigurationStatus(in, out, s) +} + +func autoConvert_v1alpha2_SystemConfigurationStorageNode_To_v1alpha3_SystemConfigurationStorageNode(in *SystemConfigurationStorageNode, out *v1alpha3.SystemConfigurationStorageNode, s conversion.Scope) error { + out.Type = in.Type + out.Name = in.Name + out.ComputesAccess = *(*[]v1alpha3.SystemConfigurationComputeNodeReference)(unsafe.Pointer(&in.ComputesAccess)) + return nil +} + +// Convert_v1alpha2_SystemConfigurationStorageNode_To_v1alpha3_SystemConfigurationStorageNode is an autogenerated conversion function. +func Convert_v1alpha2_SystemConfigurationStorageNode_To_v1alpha3_SystemConfigurationStorageNode(in *SystemConfigurationStorageNode, out *v1alpha3.SystemConfigurationStorageNode, s conversion.Scope) error { + return autoConvert_v1alpha2_SystemConfigurationStorageNode_To_v1alpha3_SystemConfigurationStorageNode(in, out, s) +} + +func autoConvert_v1alpha3_SystemConfigurationStorageNode_To_v1alpha2_SystemConfigurationStorageNode(in *v1alpha3.SystemConfigurationStorageNode, out *SystemConfigurationStorageNode, s conversion.Scope) error { + out.Type = in.Type + out.Name = in.Name + out.ComputesAccess = *(*[]SystemConfigurationComputeNodeReference)(unsafe.Pointer(&in.ComputesAccess)) + return nil +} + +// Convert_v1alpha3_SystemConfigurationStorageNode_To_v1alpha2_SystemConfigurationStorageNode is an autogenerated conversion function. +func Convert_v1alpha3_SystemConfigurationStorageNode_To_v1alpha2_SystemConfigurationStorageNode(in *v1alpha3.SystemConfigurationStorageNode, out *SystemConfigurationStorageNode, s conversion.Scope) error { + return autoConvert_v1alpha3_SystemConfigurationStorageNode_To_v1alpha2_SystemConfigurationStorageNode(in, out, s) +} + +func autoConvert_v1alpha2_Workflow_To_v1alpha3_Workflow(in *Workflow, out *v1alpha3.Workflow, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha2_Workflow_To_v1alpha3_Workflow is an autogenerated conversion function. +func Convert_v1alpha2_Workflow_To_v1alpha3_Workflow(in *Workflow, out *v1alpha3.Workflow, s conversion.Scope) error { + return autoConvert_v1alpha2_Workflow_To_v1alpha3_Workflow(in, out, s) +} + +func autoConvert_v1alpha3_Workflow_To_v1alpha2_Workflow(in *v1alpha3.Workflow, out *Workflow, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha3_Workflow_To_v1alpha2_Workflow is an autogenerated conversion function. +func Convert_v1alpha3_Workflow_To_v1alpha2_Workflow(in *v1alpha3.Workflow, out *Workflow, s conversion.Scope) error { + return autoConvert_v1alpha3_Workflow_To_v1alpha2_Workflow(in, out, s) +} + +func autoConvert_v1alpha2_WorkflowDriverStatus_To_v1alpha3_WorkflowDriverStatus(in *WorkflowDriverStatus, out *v1alpha3.WorkflowDriverStatus, s conversion.Scope) error { + out.DriverID = in.DriverID + out.TaskID = in.TaskID + out.DWDIndex = in.DWDIndex + out.WatchState = v1alpha3.WorkflowState(in.WatchState) + out.LastHB = in.LastHB + out.Completed = in.Completed + out.Status = in.Status + out.Message = in.Message + out.Error = in.Error + out.CompleteTime = (*metav1.MicroTime)(unsafe.Pointer(in.CompleteTime)) + return nil +} + +// Convert_v1alpha2_WorkflowDriverStatus_To_v1alpha3_WorkflowDriverStatus is an autogenerated conversion function. +func Convert_v1alpha2_WorkflowDriverStatus_To_v1alpha3_WorkflowDriverStatus(in *WorkflowDriverStatus, out *v1alpha3.WorkflowDriverStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_WorkflowDriverStatus_To_v1alpha3_WorkflowDriverStatus(in, out, s) +} + +func autoConvert_v1alpha3_WorkflowDriverStatus_To_v1alpha2_WorkflowDriverStatus(in *v1alpha3.WorkflowDriverStatus, out *WorkflowDriverStatus, s conversion.Scope) error { + out.DriverID = in.DriverID + out.TaskID = in.TaskID + out.DWDIndex = in.DWDIndex + out.WatchState = WorkflowState(in.WatchState) + out.LastHB = in.LastHB + out.Completed = in.Completed + out.Status = in.Status + out.Message = in.Message + out.Error = in.Error + out.CompleteTime = (*metav1.MicroTime)(unsafe.Pointer(in.CompleteTime)) + return nil +} + +// Convert_v1alpha3_WorkflowDriverStatus_To_v1alpha2_WorkflowDriverStatus is an autogenerated conversion function. +func Convert_v1alpha3_WorkflowDriverStatus_To_v1alpha2_WorkflowDriverStatus(in *v1alpha3.WorkflowDriverStatus, out *WorkflowDriverStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_WorkflowDriverStatus_To_v1alpha2_WorkflowDriverStatus(in, out, s) +} + +func autoConvert_v1alpha2_WorkflowList_To_v1alpha3_WorkflowList(in *WorkflowList, out *v1alpha3.WorkflowList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha3.Workflow)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha2_WorkflowList_To_v1alpha3_WorkflowList is an autogenerated conversion function. +func Convert_v1alpha2_WorkflowList_To_v1alpha3_WorkflowList(in *WorkflowList, out *v1alpha3.WorkflowList, s conversion.Scope) error { + return autoConvert_v1alpha2_WorkflowList_To_v1alpha3_WorkflowList(in, out, s) +} + +func autoConvert_v1alpha3_WorkflowList_To_v1alpha2_WorkflowList(in *v1alpha3.WorkflowList, out *WorkflowList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Workflow)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha3_WorkflowList_To_v1alpha2_WorkflowList is an autogenerated conversion function. +func Convert_v1alpha3_WorkflowList_To_v1alpha2_WorkflowList(in *v1alpha3.WorkflowList, out *WorkflowList, s conversion.Scope) error { + return autoConvert_v1alpha3_WorkflowList_To_v1alpha2_WorkflowList(in, out, s) +} + +func autoConvert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec(in *WorkflowSpec, out *v1alpha3.WorkflowSpec, s conversion.Scope) error { + out.DesiredState = v1alpha3.WorkflowState(in.DesiredState) + out.WLMID = in.WLMID + out.JobID = in.JobID + out.UserID = in.UserID + out.GroupID = in.GroupID + out.Hurry = in.Hurry + out.DWDirectives = *(*[]string)(unsafe.Pointer(&in.DWDirectives)) + return nil +} + +// Convert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec is an autogenerated conversion function. +func Convert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec(in *WorkflowSpec, out *v1alpha3.WorkflowSpec, s conversion.Scope) error { + return autoConvert_v1alpha2_WorkflowSpec_To_v1alpha3_WorkflowSpec(in, out, s) +} + +func autoConvert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec(in *v1alpha3.WorkflowSpec, out *WorkflowSpec, s conversion.Scope) error { + out.DesiredState = WorkflowState(in.DesiredState) + out.WLMID = in.WLMID + out.JobID = in.JobID + out.UserID = in.UserID + out.GroupID = in.GroupID + out.Hurry = in.Hurry + out.DWDirectives = *(*[]string)(unsafe.Pointer(&in.DWDirectives)) + return nil +} + +// Convert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec is an autogenerated conversion function. +func Convert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec(in *v1alpha3.WorkflowSpec, out *WorkflowSpec, s conversion.Scope) error { + return autoConvert_v1alpha3_WorkflowSpec_To_v1alpha2_WorkflowSpec(in, out, s) +} + +func autoConvert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus(in *WorkflowStatus, out *v1alpha3.WorkflowStatus, s conversion.Scope) error { + out.State = v1alpha3.WorkflowState(in.State) + out.Ready = in.Ready + out.Status = in.Status + out.Message = in.Message + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + out.Drivers = *(*[]v1alpha3.WorkflowDriverStatus)(unsafe.Pointer(&in.Drivers)) + out.DirectiveBreakdowns = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.DirectiveBreakdowns)) + out.Computes = in.Computes + out.DesiredStateChange = (*metav1.MicroTime)(unsafe.Pointer(in.DesiredStateChange)) + out.ReadyChange = (*metav1.MicroTime)(unsafe.Pointer(in.ReadyChange)) + out.ElapsedTimeLastState = in.ElapsedTimeLastState + return nil +} + +// Convert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus is an autogenerated conversion function. +func Convert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus(in *WorkflowStatus, out *v1alpha3.WorkflowStatus, s conversion.Scope) error { + return autoConvert_v1alpha2_WorkflowStatus_To_v1alpha3_WorkflowStatus(in, out, s) +} + +func autoConvert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus(in *v1alpha3.WorkflowStatus, out *WorkflowStatus, s conversion.Scope) error { + out.State = WorkflowState(in.State) + out.Ready = in.Ready + out.Status = in.Status + out.Message = in.Message + out.Env = *(*map[string]string)(unsafe.Pointer(&in.Env)) + out.Drivers = *(*[]WorkflowDriverStatus)(unsafe.Pointer(&in.Drivers)) + out.DirectiveBreakdowns = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.DirectiveBreakdowns)) + out.Computes = in.Computes + out.DesiredStateChange = (*metav1.MicroTime)(unsafe.Pointer(in.DesiredStateChange)) + out.ReadyChange = (*metav1.MicroTime)(unsafe.Pointer(in.ReadyChange)) + out.ElapsedTimeLastState = in.ElapsedTimeLastState + return nil +} + +// Convert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus is an autogenerated conversion function. +func Convert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus(in *v1alpha3.WorkflowStatus, out *WorkflowStatus, s conversion.Scope) error { + return autoConvert_v1alpha3_WorkflowStatus_To_v1alpha2_WorkflowStatus(in, out, s) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go index 97071e315..b98aeed92 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha2 import ( "github.com/DataWorkflowServices/dws/utils/dwdparse" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_types.go new file mode 100644 index 000000000..acbb83cd3 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_types.go @@ -0,0 +1,235 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ClientMountDeviceLustre defines the lustre device information for mounting +type ClientMountDeviceLustre struct { + // Lustre fsname + FileSystemName string `json:"fileSystemName"` + + // List of mgsAddresses of the form [address]@[lnet] + MgsAddresses string `json:"mgsAddresses"` +} + +// ClientMountNVMeDesc uniquely describes an NVMe namespace +type ClientMountNVMeDesc struct { + // Serial number of the base NVMe device + DeviceSerial string `json:"deviceSerial"` + + // Id of the Namespace on the NVMe device (e.g., "2") + NamespaceID string `json:"namespaceID"` + + // Globally unique namespace ID + NamespaceGUID string `json:"namespaceGUID"` +} + +// ClientMountLVMDeviceType specifies the go type for LVMDeviceType +type ClientMountLVMDeviceType string + +const ( + // ClientMountLVMDeviceTypeNVMe specifies the NVMe constant device type + ClientMountLVMDeviceTypeNVMe ClientMountLVMDeviceType = "nvme" +) + +// ClientMountDeviceLVM defines an LVM device by the VG/LV pair and optionally +// the drives that are the PVs. +type ClientMountDeviceLVM struct { + // Type of underlying block deices used for the PVs + // +kubebuilder:validation:Enum=nvme + DeviceType ClientMountLVMDeviceType `json:"deviceType"` + + // List of NVMe namespaces that are used by the VG + NVMeInfo []ClientMountNVMeDesc `json:"nvmeInfo,omitempty"` + + // LVM volume group name + VolumeGroup string `json:"volumeGroup,omitempty"` + + // LVM logical volume name + LogicalVolume string `json:"logicalVolume,omitempty"` +} + +// ClientMountDeviceReference is an reference to a different Kubernetes object +// where device information can be found +type ClientMountDeviceReference struct { + // Object reference for the device information + ObjectReference corev1.ObjectReference `json:"objectReference"` + + // Optional private data for the driver + Data int `json:"data,omitempty"` +} + +// ClientMountDeviceType specifies the go type for device type +type ClientMountDeviceType string + +const ( + // ClientMountDeviceTypeLustre is used to define the device as a Lustre file system + ClientMountDeviceTypeLustre ClientMountDeviceType = "lustre" + + // ClientMountDeviceTypeLVM is used to define the device as a LVM logical volume + ClientMountDeviceTypeLVM ClientMountDeviceType = "lvm" + + // ClientMountDeviceTypeReference is used when the device information is described in + // a separate Kubernetes resource. The clientmountd (or another controller doing the mounts) + // must know how to interpret the resource to extract the device information. + ClientMountDeviceTypeReference ClientMountDeviceType = "reference" +) + +// ClientMountDevice defines the device to mount +type ClientMountDevice struct { + // +kubebuilder:validation:Enum=lustre;lvm;reference + Type ClientMountDeviceType `json:"type"` + + // Lustre specific device information + Lustre *ClientMountDeviceLustre `json:"lustre,omitempty"` + + // LVM logical volume specific device information + LVM *ClientMountDeviceLVM `json:"lvm,omitempty"` + + DeviceReference *ClientMountDeviceReference `json:"deviceReference,omitempty"` +} + +// ClientMountInfo defines a single mount +type ClientMountInfo struct { + // Client path for mount target + MountPath string `json:"mountPath"` + + // UserID to set for the mount + UserID uint32 `json:"userID,omitempty"` + + // GroupID to set for the mount + GroupID uint32 `json:"groupID,omitempty"` + + // SetPermissions will set UserID and GroupID on the mount if true + SetPermissions bool `json:"setPermissions"` + + // Options for the file system mount + Options string `json:"options"` + + // Description of the device to mount + Device ClientMountDevice `json:"device"` + + // mount type + // +kubebuilder:validation:Enum=lustre;xfs;gfs2;none + Type string `json:"type"` + + // TargetType determines whether the mount target is a file or a directory + // +kubebuilder:validation:Enum=file;directory + TargetType string `json:"targetType"` + + // Compute is the name of the compute node which shares this mount if present. Empty if not shared. + Compute string `json:"compute,omitempty"` +} + +// ClientMountState specifies the go type for MountState +type ClientMountState string + +// ClientMountState string constants +const ( + ClientMountStateMounted ClientMountState = "mounted" + ClientMountStateUnmounted ClientMountState = "unmounted" +) + +// ClientMountSpec defines the desired state of ClientMount +type ClientMountSpec struct { + // Name of the client node that is targeted by this mount + Node string `json:"node"` + + // Desired state of the mount point + // +kubebuilder:validation:Enum=mounted;unmounted + DesiredState ClientMountState `json:"desiredState"` + + // List of mounts to create on this client + // +kubebuilder:validation:MinItems=1 + Mounts []ClientMountInfo `json:"mounts"` +} + +// ClientMountInfoStatus is the status for a single mount point +type ClientMountInfoStatus struct { + // Current state + // +kubebuilder:validation:Enum=mounted;unmounted + State ClientMountState `json:"state"` + + // Ready indicates whether status.state has been achieved + Ready bool `json:"ready"` +} + +// ClientMountStatus defines the observed state of ClientMount +type ClientMountStatus struct { + // List of mount statuses + Mounts []ClientMountInfoStatus `json:"mounts"` + + // Rollup of each mounts ready status + AllReady bool `json:"allReady"` + + // Error information + ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="The desired state" +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.allReady",description="True if desired state is achieved" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// ClientMount is the Schema for the clientmounts API +type ClientMount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClientMountSpec `json:"spec,omitempty"` + Status ClientMountStatus `json:"status,omitempty"` +} + +func (c *ClientMount) GetStatus() updater.Status[*ClientMountStatus] { + return &c.Status +} + +//+kubebuilder:object:root=true + +// ClientMountList contains a list of ClientMount +type ClientMountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClientMount `json:"items"` +} + +// GetObjectList returns a list of Client references. +func (c *ClientMountList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range c.Items { + objectList = append(objectList, &c.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&ClientMount{}, &ClientMountList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_webhook.go index c95c432a5..1524896b5 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/clientmount_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/clientmount_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_types.go new file mode 100644 index 000000000..649ab251a --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_types.go @@ -0,0 +1,65 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ComputesData defines the compute nodes that are assigned to the workflow +type ComputesData struct { + // Name is the identifer name for the compute node + Name string `json:"name"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion + +// Computes is the Schema for the computes API +type Computes struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Data []ComputesData `json:"data,omitempty"` +} + +//+kubebuilder:object:root=true + +// ComputesList contains a list of Computes +type ComputesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Computes `json:"items"` +} + +func (c *ComputesList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range c.Items { + objectList = append(objectList, &c.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&Computes{}, &ComputesList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_webhook.go index 1d78a40de..0e01b8df8 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/computes_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/computes_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/conversion.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/conversion.go new file mode 100644 index 000000000..6c5e3af9d --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/conversion.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +func (*ClientMount) Hub() {} +func (*Computes) Hub() {} +func (*DWDirectiveRule) Hub() {} +func (*DirectiveBreakdown) Hub() {} +func (*PersistentStorageInstance) Hub() {} +func (*Servers) Hub() {} +func (*Storage) Hub() {} +func (*SystemConfiguration) Hub() {} +func (*Workflow) Hub() {} + +// The conversion-verifier tool wants these...though they're never used. +func (*ClientMountList) Hub() {} +func (*ComputesList) Hub() {} +func (*DWDirectiveRuleList) Hub() {} +func (*DirectiveBreakdownList) Hub() {} +func (*PersistentStorageInstanceList) Hub() {} +func (*ServersList) Hub() {} +func (*StorageList) Hub() {} +func (*SystemConfigurationList) Hub() {} +func (*WorkflowList) Hub() {} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_types.go new file mode 100644 index 000000000..392858e64 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_types.go @@ -0,0 +1,233 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type AllocationStrategy string + +const ( + AllocatePerCompute AllocationStrategy = "AllocatePerCompute" + AllocateAcrossServers AllocationStrategy = "AllocateAcrossServers" + AllocateSingleServer AllocationStrategy = "AllocateSingleServer" +) + +const ( + // DirectiveLifetimeJob specifies storage allocated for the lifetime of the job + DirectiveLifetimeJob = "job" + // DirectiveLifetimePersistent specifies storage allocated an indefinite lifetime usually longer than a job + DirectiveLifetimePersistent = "persistent" +) + +// AllocationSetColocationConstraint specifies how to colocate storage resources. +// A colocation constraint specifies how the location(s) of an allocation set should be +// selected with relation to other allocation sets. Locations for allocation sets with the +// same colocation key should be picked according to the colocation type. +type AllocationSetColocationConstraint struct { + // Type of colocation constraint + // +kubebuilder:validation:Enum=exclusive + Type string `json:"type"` + + // Key shared by all the allocation sets that have their location constrained + // in relation to each other. + Key string `json:"key"` +} + +// AllocationSetConstraints specifies the constraints required for colocation of Storage +// resources +type AllocationSetConstraints struct { + // Labels is a list of labels is used to filter the Storage resources + Labels []string `json:"labels,omitempty"` + + // Scale is a hint for the number of allocations to make based on a 1-10 value + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=10 + Scale int `json:"scale,omitempty"` + + // Count is the number of the allocations to make + // +kubebuilder:validation:Minimum:=1 + Count int `json:"count,omitempty"` + + // Colocation is a list of constraints for which Storage resources + // to pick in relation to Storage resources for other allocation sets. + Colocation []AllocationSetColocationConstraint `json:"colocation,omitempty"` +} + +// StorageAllocationSet defines the details of an allocation set +type StorageAllocationSet struct { + // AllocationStrategy specifies the way to determine the number of allocations of the MinimumCapacity required for this AllocationSet. + // +kubebuilder:validation:Enum=AllocatePerCompute;AllocateAcrossServers;AllocateSingleServer + AllocationStrategy AllocationStrategy `json:"allocationStrategy"` + + // MinimumCapacity is the minumum number of bytes required to meet the needs of the filesystem that + // will use the storage. + // +kubebuilder:validation:Minimum:=1 + MinimumCapacity int64 `json:"minimumCapacity"` + + // Label is an identifier used to communicate from the DWS interface to internal interfaces + // the filesystem use of this AllocationSet. + // +kubebuilder:validation:Enum=raw;xfs;gfs2;mgt;mdt;mgtmdt;ost; + Label string `json:"label"` + + // Constraint is an additional requirement pertaining to the suitability of Storage resources that may be used + // for this AllocationSet + Constraints AllocationSetConstraints `json:"constraints,omitempty"` +} + +const ( + StorageLifetimePersistent = "persistent" + StorageLifetimeJob = "job" +) + +// StorageBreakdown describes the storage requirements of a directive +type StorageBreakdown struct { + // Lifetime is the duration of the allocation + // +kubebuilder:validation:Enum=job;persistent + Lifetime string `json:"lifetime"` + + // Reference is an ObjectReference to another resource + Reference corev1.ObjectReference `json:"reference,omitempty"` + + // AllocationSets lists the allocations required to fulfill the #DW Directive + AllocationSets []StorageAllocationSet `json:"allocationSets,omitempty"` +} + +type ComputeLocationType string + +const ( + ComputeLocationNetwork ComputeLocationType = "network" + ComputeLocationPhysical ComputeLocationType = "physical" +) + +type ComputeLocationPriority string + +const ( + ComputeLocationPriorityMandatory ComputeLocationPriority = "mandatory" + ComputeLocationPriorityBestEffort ComputeLocationPriority = "bestEffort" +) + +type ComputeLocationAccess struct { + // Type is the relationship between the compute nodes and the resource in the Reference + // +kubebuilder:validation:Enum=physical;network + Type ComputeLocationType `json:"type"` + + // Priority specifies whether the location constraint is mandatory or best effort + // +kubebuilder:validation:Enum=mandatory;bestEffort + Priority ComputeLocationPriority `json:"priority"` +} + +// ComputeLocationConstraint describes a constraints on which compute nodes can be used with +// a directive based on their location +type ComputeLocationConstraint struct { + Access []ComputeLocationAccess `json:"access"` + + // Reference is an object reference to a resource that contains the location information + Reference corev1.ObjectReference `json:"reference"` +} + +// ComputeConstraints describes the constraints to use when picking compute nodes +type ComputeConstraints struct { + // Location is a list of location constraints + Location []ComputeLocationConstraint `json:"location,omitempty"` +} + +// ComputeBreakdown describes the compute requirements of a directive +type ComputeBreakdown struct { + // Constraints to use when picking compute nodes + Constraints ComputeConstraints `json:"constraints,omitempty"` +} + +// DirectiveBreakdownSpec defines the directive string to breakdown +type DirectiveBreakdownSpec struct { + // Directive is a copy of the #DW for this breakdown + Directive string `json:"directive"` + + // User ID of the user associated with the job + UserID uint32 `json:"userID"` +} + +// DirectiveBreakdownStatus defines the storage information WLM needs to select NNF Nodes and request storage from the selected nodes +type DirectiveBreakdownStatus struct { + // Storage is the storage breakdown for the directive + Storage *StorageBreakdown `json:"storage,omitempty"` + + // Compute is the compute breakdown for the directive + Compute *ComputeBreakdown `json:"compute,omitempty"` + + // Ready indicates whether AllocationSets have been generated (true) or not (false) + Ready bool `json:"ready"` + + // RequiredDeamons tells the WLM about any driver-specific daemons it must enable for the job; it is assumed that the WLM knows about the driver-specific daemons and that if the users are specifying these then the WLM knows how to start them + RequiredDaemons []string `json:"requiredDaemons,omitempty"` + + // Error information + ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if allocation sets have been generated" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// DirectiveBreakdown is the Schema for the directivebreakdown API +type DirectiveBreakdown struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DirectiveBreakdownSpec `json:"spec,omitempty"` + Status DirectiveBreakdownStatus `json:"status,omitempty"` +} + +func (db *DirectiveBreakdown) GetStatus() updater.Status[*DirectiveBreakdownStatus] { + return &db.Status +} + +//+kubebuilder:object:root=true + +// DirectiveBreakdownList contains a list of DirectiveBreakdown +type DirectiveBreakdownList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DirectiveBreakdown `json:"items"` +} + +func (d *DirectiveBreakdownList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range d.Items { + objectList = append(objectList, &d.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&DirectiveBreakdown{}, &DirectiveBreakdownList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_webhook.go index cc5ac5be2..7819828af 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/directivebreakdown_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/directivebreakdown_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_types.go new file mode 100644 index 000000000..b578d3d80 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_types.go @@ -0,0 +1,51 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/dwdparse" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion + +// DWDirectiveRule is the Schema for the DWDirective API +type DWDirectiveRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec []dwdparse.DWDirectiveRuleSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// DWDirectiveRuleList contains a list of DWDirective +type DWDirectiveRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DWDirectiveRule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DWDirectiveRule{}, &DWDirectiveRuleList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_webhook.go index 82276e706..beebd56e6 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/dwdirectiverule_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/dwdirectiverule_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/groupversion_info.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/groupversion_info.go new file mode 100644 index 000000000..f130a16e6 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/groupversion_info.go @@ -0,0 +1,39 @@ +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package v1alpha3 contains API Schema definitions for the dataworkflowservices v1alpha3 API group +// +kubebuilder:object:generate=true +// +groupName=dataworkflowservices.github.io +package v1alpha3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "dataworkflowservices.github.io", Version: "v1alpha3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/owner_labels.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/owner_labels.go new file mode 100644 index 000000000..0a83a0147 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/owner_labels.go @@ -0,0 +1,290 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "context" + "reflect" + + "golang.org/x/sync/errgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + OwnerKindLabel = "dataworkflowservices.github.io/owner.kind" + OwnerNameLabel = "dataworkflowservices.github.io/owner.name" + OwnerNamespaceLabel = "dataworkflowservices.github.io/owner.namespace" + OwnerUidLabel = "dataworkflowservices.github.io/owner.uid" +) + +// +kubebuilder:object:generate=false +type ObjectList interface { + GetObjectList() []client.Object +} + +// AddOwnerLabels adds labels to a child resource that identifies the owner +func AddOwnerLabels(child metav1.Object, owner metav1.Object) { + labels := child.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[OwnerKindLabel] = reflect.Indirect(reflect.ValueOf(owner)).Type().Name() + labels[OwnerNameLabel] = owner.GetName() + labels[OwnerNamespaceLabel] = owner.GetNamespace() + labels[OwnerUidLabel] = string(owner.GetUID()) + + child.SetLabels(labels) +} + +// MatchingOwner returns the MatchingLabels to match the owner labels +func MatchingOwner(owner metav1.Object) client.MatchingLabels { + return client.MatchingLabels(map[string]string{ + OwnerKindLabel: reflect.Indirect(reflect.ValueOf(owner)).Type().Name(), + OwnerNameLabel: owner.GetName(), + OwnerNamespaceLabel: owner.GetNamespace(), + OwnerUidLabel: string(owner.GetUID()), + }) +} + +func RemoveOwnerLabels(child metav1.Object) { + labels := child.GetLabels() + if labels == nil { + return + } + + delete(labels, OwnerKindLabel) + delete(labels, OwnerNameLabel) + delete(labels, OwnerNamespaceLabel) + delete(labels, OwnerUidLabel) + + child.SetLabels(labels) +} + +// AddWorkflowLabels adds labels to a resource to indicate which workflow it belongs to +func AddWorkflowLabels(child metav1.Object, workflow *Workflow) { + labels := child.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[WorkflowNameLabel] = workflow.Name + labels[WorkflowNamespaceLabel] = workflow.Namespace + labels[WorkflowUidLabel] = string(workflow.GetUID()) + + child.SetLabels(labels) +} + +// MatchingWorkflow returns the MatchingLabels to match the workflow labels +func MatchingWorkflow(workflow *Workflow) client.MatchingLabels { + return client.MatchingLabels(map[string]string{ + WorkflowNameLabel: workflow.Name, + WorkflowNamespaceLabel: workflow.Namespace, + }) +} + +// AddPersistentStorageLabels adds labels to a resource to indicate which persistent storage instance it belongs to +func AddPersistentStorageLabels(child metav1.Object, persistentStorage *PersistentStorageInstance) { + labels := child.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[PersistentStorageNameLabel] = persistentStorage.Name + labels[PersistentStorageNamespaceLabel] = persistentStorage.Namespace + + child.SetLabels(labels) +} + +// MatchingPersistentStorage returns the MatchingLabels to match the persistent storage labels +func MatchingPersistentStorage(persistentStorage *PersistentStorageInstance) client.MatchingLabels { + return client.MatchingLabels(map[string]string{ + PersistentStorageNameLabel: persistentStorage.Name, + PersistentStorageNamespaceLabel: persistentStorage.Namespace, + }) +} + +// InheritParentLabels adds all labels from a parent resource to a child resource, excluding +// the owner labels +func InheritParentLabels(child metav1.Object, owner metav1.Object) { + labels := child.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + for key, value := range owner.GetLabels() { + // don't inherit owner labels + if key == OwnerNameLabel || key == OwnerNamespaceLabel || key == OwnerKindLabel { + continue + } + + labels[key] = value + } + + child.SetLabels(labels) +} + +// DeleteStatus provides information about the status of DeleteChildren* operation +// +kubebuilder:object:generate=false +// +k8s:conversion-gen=false +type DeleteStatus struct { + complete bool + objects []client.Object +} + +// Complete returns true if the delete is complete, and false otherwise +func (d *DeleteStatus) Complete() bool { return d.complete } + +// Info returns key/value pairs that describe the delete status operation; the returned array +// must alternate string keys and arbitrary values so it can be passed to logr.Logging.Info() +func (d *DeleteStatus) Info() []interface{} { + args := make([]interface{}, 0) + args = append(args, "complete", d.complete) + + if len(d.objects) >= 1 { + args = append(args, "object", client.ObjectKeyFromObject(d.objects[0]).String()) + } + if len(d.objects) > 1 { + args = append(args, "count", len(d.objects)) + } + + return args +} + +var deleteRetry = DeleteStatus{complete: false} +var deleteComplete = DeleteStatus{complete: true} + +func (d DeleteStatus) withObject(obj client.Object) DeleteStatus { + d.objects = []client.Object{obj} + return d +} + +func (d DeleteStatus) withObjectList(objs []client.Object) DeleteStatus { + d.objects = objs + return d +} + +// deleteChildrenSingle deletes all the children of a single type. Children are found using +// the owner labels +func deleteChildrenSingle(ctx context.Context, c client.Client, childObjectList ObjectList, parent metav1.Object, matchingLabels client.MatchingLabels) (DeleteStatus, error) { + // List all the children and filter by the owner labels + err := c.List(ctx, childObjectList.(client.ObjectList), matchingLabels) + if err != nil { + return deleteRetry, err + } + + objectList := childObjectList.GetObjectList() + if len(objectList) == 0 { + return deleteComplete, nil + } + + // Check whether the child objects span multiple namespaces. + multipleNamespaces := false + namespace := "" + for _, obj := range objectList { + if obj.GetNamespace() != namespace && namespace != "" { + multipleNamespaces = true + } + + namespace = obj.GetNamespace() + + // Wait for any deletes to finish if the resource is already marked for deletion + if !obj.GetDeletionTimestamp().IsZero() { + return deleteRetry.withObject(obj), nil + } + } + + // If all the child resources are in a single namespace then we can use DeleteAllOf + if !multipleNamespaces { + err = c.DeleteAllOf(ctx, objectList[0], client.InNamespace(namespace), matchingLabels) + if err != nil { + return deleteRetry, err + } + + return deleteRetry.withObjectList(objectList), nil + } + + // If the child resources span multiple namespaces, then we have to delete them + // each individually. + g := new(errgroup.Group) + for _, obj := range objectList { + obj := obj + + // Start a goroutine for each object to delete + g.Go(func() error { + return c.Delete(ctx, obj) + }) + } + + return deleteRetry.withObjectList(objectList), g.Wait() +} + +// DeleteChildrenWithLabels deletes all the children of a parent with the resource types defined +// in a list of ObjectList types and the labels defined in matchingLabels. All children of a +// single type will be fully deleted before starting to delete any children of the next type. +func DeleteChildrenWithLabels(ctx context.Context, c client.Client, childObjectLists []ObjectList, parent metav1.Object, matchingLabels client.MatchingLabels) (DeleteStatus, error) { + for label, value := range MatchingOwner(parent) { + matchingLabels[label] = value + } + + for _, childObjectList := range childObjectLists { + deleteStatus, err := deleteChildrenSingle(ctx, c, childObjectList, parent, matchingLabels) + if err != nil { + return deleteRetry, err + } + + if !deleteStatus.Complete() { + return deleteStatus, nil + } + } + + return deleteComplete, nil +} + +// DeleteChildren deletes all the children of a parent with the resource types defined +// in a list of ObjectList types. All children of a single type will be fully deleted +// before starting to delete any children of the next type. +func DeleteChildren(ctx context.Context, c client.Client, childObjectLists []ObjectList, parent metav1.Object) (DeleteStatus, error) { + return DeleteChildrenWithLabels(ctx, c, childObjectLists, parent, client.MatchingLabels(map[string]string{})) +} + +func OwnerLabelMapFunc(ctx context.Context, o client.Object) []reconcile.Request { + labels := o.GetLabels() + + ownerName, exists := labels[OwnerNameLabel] + if exists == false { + return []reconcile.Request{} + } + + ownerNamespace, exists := labels[OwnerNamespaceLabel] + if exists == false { + return []reconcile.Request{} + } + + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: ownerName, + Namespace: ownerNamespace, + }}, + } +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_types.go new file mode 100644 index 000000000..d08fa2509 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_types.go @@ -0,0 +1,131 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // PersistentStorageNameLabel is defined for resources that relate to the name of a DWS PersistentStorageInstance + PersistentStorageNameLabel = "dataworkflowservices.github.io/persistentstorage.name" + + // PersistentStorageNamespaceLabel is defined for resources that relate to the namespace of a DWS PersistentStorageInstance + PersistentStorageNamespaceLabel = "dataworkflowservices.github.io/persistentstorage.namespace" +) + +// PersistentStorageInstanceState specifies the golang type for PSIState +type PersistentStorageInstanceState string + +// State enumerations +const ( + // The storage and filesystem represented by the PSI exists and is ready for use + PSIStateEnabled PersistentStorageInstanceState = "Enabled" + + // A #DW destroy_persistent directive has been issued in a workflow. + // Once all other workflows with persistent_dw reservations on the PSI complete, the PSI will be destroyed. + // New #DW persistent_dw requests after the PSI enters the 'destroying' state will fail. + PSIStateDisabled PersistentStorageInstanceState = "Disabled" +) + +// PersistentStorageInstanceSpec defines the desired state of PersistentStorageInstance +type PersistentStorageInstanceSpec struct { + // Name is the name given to this persistent storage instance. + Name string `json:"name"` + + // FsType describes the File System Type for this storage instance. + // +kubebuilder:validation:Enum:=raw;xfs;gfs2;lustre + FsType string `json:"fsType"` + + // DWDirective is a copy of the #DW for this instance + DWDirective string `json:"dwDirective"` + + // User ID of the user that created the persistent storage + UserID uint32 `json:"userID"` + + // Desired state of the PersistentStorageInstance + // +kubebuilder:validation:Enum:=Enabled;Disabled + State PersistentStorageInstanceState `json:"state"` + + // List of consumers using this persistent storage + ConsumerReferences []corev1.ObjectReference `json:"consumerReferences,omitempty"` +} + +// PersistentStorageInstanceStatus defines the observed state of PersistentStorageInstance +type PersistentStorageInstanceStatus struct { + // Servers refers to the Servers resource that provides the backing storage for this storage instance + Servers corev1.ObjectReference `json:"servers,omitempty"` + + // Current state of the PersistentStorageInstance + // +kubebuilder:validation:Enum:=Enabled;Disabled + State PersistentStorageInstanceState `json:"state"` + + Ready bool `json:"ready"` + + // Error information + ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// PersistentStorageInstance is the Schema for the Persistentstorageinstances API +type PersistentStorageInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PersistentStorageInstanceSpec `json:"spec,omitempty"` + Status PersistentStorageInstanceStatus `json:"status,omitempty"` +} + +func (psi *PersistentStorageInstance) GetStatus() updater.Status[*PersistentStorageInstanceStatus] { + return &psi.Status +} + +//+kubebuilder:object:root=true + +// PersistentStorageInstanceList contains a list of PersistentStorageInstances +type PersistentStorageInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PersistentStorageInstance `json:"items"` +} + +// GetObjectList returns a list of PersistentStorageInstance references. +func (p *PersistentStorageInstanceList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range p.Items { + objectList = append(objectList, &p.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&PersistentStorageInstance{}, &PersistentStorageInstanceList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_webhook.go index b07b076c8..d20d56e43 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/persistentstorageinstance_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/persistentstorageinstance_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource.go new file mode 100644 index 000000000..bac185151 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource.go @@ -0,0 +1,74 @@ +/* + * Copyright 2023-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +// ResourceState is the enumeration of the state of a DWS resource +// +kubebuilder:validation:Enum:=Enabled;Disabled +type ResourceState string + +const ( + // NOTE: Any new enumeration values must update the related kubebuilder validation + + // Enabled means the resource shall be enabled. + EnabledState ResourceState = "Enabled" + + // Disabled means the resource shall be disabled. + DisabledState ResourceState = "Disabled" +) + +// ResourceStatus is the enumeration of the status of a DWS resource +// +kubebuilder:validation:Enum:=Starting;Ready;Disabled;NotPresent;Offline;Failed;Degraded;Drained;Unknown +type ResourceStatus string + +const ( + // NOTE: Any new enumeration values must update the related kubebuilder validation + + // Starting means the resource is currently starting prior to becoming ready. + StartingStatus ResourceStatus = "Starting" + + // Ready means the resource is fully operational and ready for use. + ReadyStatus ResourceStatus = "Ready" + + // Disabled means the resource is present but disabled by an administrator or external + // user. + DisabledStatus ResourceStatus = "Disabled" + + // NotPresent means the resource is not present within the system, likely because + // it is missing or powered down. This differs from the Offline state in that the + // resource is not known to exist. + NotPresentStatus ResourceStatus = "NotPresent" + + // Offline means the resource is offline and cannot be communicated with. This differs + // from the NotPresent state in that the resource is known to exist. + OfflineStatus ResourceStatus = "Offline" + + // Failed means the resource has failed during startup or execution. + FailedStatus ResourceStatus = "Failed" + + // Degraded means the resource is ready but operating in a degraded state. Certain + // recovery actions may alleviate a degraded status. + DegradedStatus ResourceStatus = "Degraded" + + // Drained means the resource has had its pods drained from the node. + DrainedStatus ResourceStatus = "Drained" + + // Unknown means the resource status is unknown. + UnknownStatus ResourceStatus = "Unknown" +) diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource_error.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource_error.go new file mode 100644 index 000000000..2df40eb70 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/resource_error.go @@ -0,0 +1,207 @@ +/* + * Copyright 2022-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +type ResourceErrorSeverity string +type ResourceErrorType string + +const ( + // Minor errors are very likely to eventually succeed (e.g., errors caused by a stale cache) + // The WLM doesn't see these errors directly. The workflow stays in the DriverWait state, and + // the error string is put in workflow.Status.Message. + SeverityMinor ResourceErrorSeverity = "Minor" + + // Major errors may or may not succeed. These are transient errors that could be persistent + // due to an underlying problem (e.g., errors from OS calls) + SeverityMajor ResourceErrorSeverity = "Major" + + // Fatal errors will never succeed. This is for situations where we can guarantee that retrying + // will not fix the error (e.g., a DW directive that is not valid) + SeverityFatal ResourceErrorSeverity = "Fatal" +) + +const ( + // Internal errors are due to an error in the DWS/driver code + TypeInternal ResourceErrorType = "Internal" + + // WLM errors are due to an error with the input from the WLM + TypeWLM ResourceErrorType = "WLM" + + // User errors are due to an error with the input from a user + TypeUser ResourceErrorType = "User" +) + +type ResourceErrorInfo struct { + // Optional user facing message if the error is relevant to an end user + UserMessage string `json:"userMessage,omitempty"` + + // Internal debug message for the error + DebugMessage string `json:"debugMessage"` + + // Internal or user error + // +kubebuilder:validation:Enum=Internal;User;WLM + Type ResourceErrorType `json:"type"` + + // Indication of how severe the error is. Minor will likely succeed, Major may + // succeed, and Fatal will never succeed. + // +kubebuilder:validation:Enum=Minor;Major;Fatal + Severity ResourceErrorSeverity `json:"severity"` +} + +type ResourceError struct { + // Error information + Error *ResourceErrorInfo `json:"error,omitempty"` +} + +func NewResourceError(format string, a ...any) *ResourceErrorInfo { + return &ResourceErrorInfo{ + Type: TypeInternal, + Severity: SeverityMinor, + DebugMessage: fmt.Sprintf(format, a...), + } +} + +// A resource error can have an optional user message that is displayed in the workflow.Status.Message +// field. The user message of the lowest level error is all that's displayed. +func (e *ResourceErrorInfo) WithUserMessage(format string, a ...any) *ResourceErrorInfo { + // Only set the user message if it's empty. This prevents upper layers + // from overriding a user message set by a lower layer + if e.UserMessage == "" { + e.UserMessage = fmt.Sprintf(format, a...) + } + + return e +} + +func (e *ResourceErrorInfo) WithError(err error) *ResourceErrorInfo { + if err == nil { + return e + } + + // Concatenate the parent and child debug messages + debugMessageList := []string{} + if e.DebugMessage != "" { + debugMessageList = append(debugMessageList, e.DebugMessage) + } + + childError, ok := err.(*ResourceErrorInfo) + if ok { + // Inherit the severity and the user message if the child error is a ResourceError + e.Severity = childError.Severity + e.UserMessage = childError.UserMessage + e.Type = childError.Type + + // If the child resource error doesn't have a debug message, use the user message instead + if childError.DebugMessage == "" { + debugMessageList = append(debugMessageList, childError.UserMessage) + } else { + debugMessageList = append(debugMessageList, childError.DebugMessage) + } + } else { + debugMessageList = append(debugMessageList, err.Error()) + } + + e.DebugMessage = strings.Join(debugMessageList, ": ") + + return e +} + +func (e *ResourceErrorInfo) WithFatal() *ResourceErrorInfo { + e.Severity = SeverityFatal + return e +} + +func (e *ResourceErrorInfo) WithMajor() *ResourceErrorInfo { + if e.Severity != SeverityFatal { + e.Severity = SeverityMajor + } + return e +} + +func (e *ResourceErrorInfo) WithMinor() *ResourceErrorInfo { + if e.Severity != SeverityFatal && e.Severity != SeverityMajor { + e.Severity = SeverityMinor + } + return e +} + +func (e *ResourceErrorInfo) WithInternal() *ResourceErrorInfo { + e.Type = TypeInternal + return e +} + +func (e *ResourceErrorInfo) WithWLM() *ResourceErrorInfo { + e.Type = TypeWLM + return e +} + +func (e *ResourceErrorInfo) WithUser() *ResourceErrorInfo { + e.Type = TypeUser + return e +} + +func (e *ResourceErrorInfo) Error() string { + message := "" + if e.DebugMessage == "" { + message = e.UserMessage + } else { + message = e.DebugMessage + } + return fmt.Sprintf("%s error: %s", strings.ToLower(string(e.Type)), message) +} + +func (e *ResourceErrorInfo) GetUserMessage() string { + return fmt.Sprintf("%s error: %s", string(e.Type), e.UserMessage) +} + +func (e *ResourceError) SetResourceErrorAndLog(err error, log logr.Logger) { + e.SetResourceError(err) + if err == nil { + return + } + + childError, ok := err.(*ResourceErrorInfo) + if ok { + if childError.Severity == SeverityFatal { + log.Error(err, "Fatal error") + return + } + + log.Info("Recoverable Error", "Severity", childError.Severity, "Message", err.Error()) + return + } + + log.Info("Recoverable Error", "Message", err.Error()) +} + +func (e *ResourceError) SetResourceError(err error) { + if err == nil { + e.Error = nil + } else { + e.Error = NewResourceError("").WithError(err) + } +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_types.go new file mode 100644 index 000000000..238893026 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_types.go @@ -0,0 +1,131 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Important: Run "make" to regenerate code after modifying this file + +// ServersSpecStorage specifies info required to identify the storage to +// use, and the number of allocations to make on that storage. +// ServersSpecAllocationSet.AllocationSize specifies the size of each allocation. +type ServersSpecStorage struct { + // The name of the storage + Name string `json:"name"` + + // The number of allocations to create of the size in bytes specified in ServersSpecAllocationSet + // +kubebuilder:validation:Minimum=1 + AllocationCount int `json:"allocationCount"` +} + +// ServersSpecAllocationSet is a set of allocations that all share the same allocation +// size and allocation type (e.g., XFS) +type ServersSpecAllocationSet struct { + // Label as specified in the DirectiveBreakdown + Label string `json:"label"` + + // Allocation size in bytes + // +kubebuilder:validation:Minimum=1 + AllocationSize int64 `json:"allocationSize"` + + // List of storage resources where allocations are created + Storage []ServersSpecStorage `json:"storage"` +} + +// ServersSpec defines the desired state of Servers +type ServersSpec struct { + AllocationSets []ServersSpecAllocationSet `json:"allocationSets,omitempty"` +} + +// ServersStatusStorage is the status of the allocations on a storage +type ServersStatusStorage struct { + // Allocation size in bytes + AllocationSize int64 `json:"allocationSize"` +} + +// ServersStatusAllocationSet is the status of a set of allocations +type ServersStatusAllocationSet struct { + // Label as specified in the DirectiveBreakdown + Label string `json:"label"` + + // List of storage resources that have allocations + Storage map[string]ServersStatusStorage `json:"storage"` +} + +// ServersStatus specifies whether the Servers has achieved the +// ready condition along with the allocationSets that are managed +// by the Servers resource. +type ServersStatus struct { + Ready bool `json:"ready"` + LastUpdate *metav1.MicroTime `json:"lastUpdate,omitempty"` + AllocationSets []ServersStatusAllocationSet `json:"allocationSets,omitempty"` + + // Error information + ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if allocation sets have been generated" +//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// Servers is the Schema for the servers API +type Servers struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ServersSpec `json:"spec,omitempty"` + Status ServersStatus `json:"status,omitempty"` +} + +func (s *Servers) GetStatus() updater.Status[*ServersStatus] { + return &s.Status +} + +//+kubebuilder:object:root=true + +// ServersList contains a list of Servers +type ServersList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Servers `json:"items"` +} + +// GetObjectList returns a list of Servers references. +func (s *ServersList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range s.Items { + objectList = append(objectList, &s.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&Servers{}, &ServersList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_webhook.go index 2a834f6c3..f0bf191b6 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/servers_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/servers_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_types.go new file mode 100644 index 000000000..9b3cf9ff6 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_types.go @@ -0,0 +1,181 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/updater" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // StorageTypeLabel is the label key used for tagging Storage resources + // with a driver specific label. For example: dataworkflowservices.github.io/storage=Rabbit + StorageTypeLabel = "dataworkflowservices.github.io/storage" +) + +// StorageSpec defines the desired specifications of Storage resource +type StorageSpec struct { + // State describes the desired state of the Storage resource. + // +kubebuilder:default:=Enabled + State ResourceState `json:"state,omitempty"` + + // Mode indicates whether the resource is live and is being updated + // by the reconcilers or whether it is in testing mode and is being + // updated manually. + // +kubebuilder:validation:Enum:=Live;Testing + // +kubebuilder:default:=Live + Mode string `json:"mode,omitempty"` +} + +// StorageDevice contains the details of the storage hardware +type StorageDevice struct { + // Model is the manufacturer information about the device + Model string `json:"model,omitempty"` + + // The serial number for this storage controller. + SerialNumber string `json:"serialNumber,omitempty"` + + // The firmware version of this storage controller. + FirmwareVersion string `json:"firmwareVersion,omitempty"` + + // Physical slot location of the storage controller. + Slot string `json:"slot,omitempty"` + + // Capacity in bytes of the device. The full capacity may not + // be usable depending on what the storage driver can provide. + Capacity int64 `json:"capacity,omitempty"` + + // WearLevel in percent for SSDs. A value of 100 indicates the estimated endurance of the non-volatile memory + // has been consumed, but may not indicate a storage failure. + WearLevel *int64 `json:"wearLevel,omitempty"` + + // Status of the individual device + Status ResourceStatus `json:"status,omitempty"` +} + +// Node provides the status of either a compute or a server +type Node struct { + // Name is the Kubernetes name of the node + Name string `json:"name,omitempty"` + + // Status of the node + Status ResourceStatus `json:"status,omitempty"` +} + +// StorageAccessProtocol is the enumeration of supported protocols. +// +kubebuilder:validation:Enum:=PCIe +type StorageAccessProtocol string + +const ( + PCIe StorageAccessProtocol = "PCIe" +) + +// StorageAccess contains nodes and the protocol that may access the storage +type StorageAccess struct { + // Protocol is the method that this storage can be accessed + Protocol StorageAccessProtocol `json:"protocol,omitempty"` + + // Servers is the list of non-compute nodes that have access to the storage + Servers []Node `json:"servers,omitempty"` + + // Computes is the list of compute nodes that have access to the storage + Computes []Node `json:"computes,omitempty"` +} + +// StorageType is the enumeration of storage types. +// +kubebuilder:validation:Enum:=NVMe +type StorageType string + +const ( + NVMe StorageType = "NVMe" +) + +// StorageData contains the data about the storage +type StorageStatus struct { + // Type describes what type of storage this is + Type StorageType `json:"type,omitempty"` + + // Devices is the list of physical devices that make up this storage + Devices []StorageDevice `json:"devices,omitempty"` + + // Access contains the information about where the storage is accessible + Access StorageAccess `json:"access,omitempty"` + + // Capacity is the number of bytes this storage provides. This is the + // total accessible bytes as determined by the driver and may be different + // than the sum of the devices' capacities. + // +kubebuilder:default:=0 + Capacity int64 `json:"capacity"` + + // Status is the overall status of the storage + Status ResourceStatus `json:"status,omitempty"` + + // Reboot Required is true if the node requires a reboot and false otherwise. A reboot my be + // necessary to recover from certain hardware failures or high-availability clustering events. + RebootRequired bool `json:"rebootRequired,omitempty"` + + // Message provides additional details on the current status of the resource + Message string `json:"message,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="State",type="string",JSONPath=".spec.state",description="State of the storage resource" +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status",description="Status of the storage resource" +//+kubebuilder:printcolumn:name="Mode",type="string",JSONPath=".spec.mode",description="State of live updates" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// Storage is the Schema for the storages API +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageSpec `json:"spec"` + Status StorageStatus `json:"status,omitempty"` +} + +func (s *Storage) GetStatus() updater.Status[*StorageStatus] { + return &s.Status +} + +//+kubebuilder:object:root=true + +// StorageList contains a list of Storage +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} + +func (s *StorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range s.Items { + objectList = append(objectList, &s.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&Storage{}, &StorageList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_webhook.go index 4c3ae474a..a2ef10bd7 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/storage_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/storage_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_types.go new file mode 100644 index 000000000..a7ef2708d --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_types.go @@ -0,0 +1,156 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/DataWorkflowServices/dws/utils/updater" +) + +// SystemConfigurationExternalComputeNode describes a compute node that is +// not directly matched with any of the nodes in the StorageNodes list. +type SystemConfigurationExternalComputeNode struct { + // Name of the compute node + Name string `json:"name"` +} + +// SystemConfigurationComputeNodeReference describes a compute node that +// has access to a server. +type SystemConfigurationComputeNodeReference struct { + // Name of the compute node + Name string `json:"name"` + + // Index of the compute node from the server + Index int `json:"index"` +} + +// SystemConfigurationStorageNode describes a storage node in the system +type SystemConfigurationStorageNode struct { + // Type is the type of server + Type string `json:"type"` + + // Name of the server node + Name string `json:"name"` + + // ComputesAccess is the list of compute nodes that can use the server + ComputesAccess []SystemConfigurationComputeNodeReference `json:"computesAccess,omitempty"` +} + +// SystemConfigurationSpec describes the node layout of the system. This is filled in by +// an administrator at software installation time. +type SystemConfigurationSpec struct { + // ExternalComputeNodes is the list of compute nodes that are not + // directly matched with any of the StorageNodes. + ExternalComputeNodes []SystemConfigurationExternalComputeNode `json:"externalComputeNodes,omitempty"` + + // StorageNodes is the list of storage nodes on the system + StorageNodes []SystemConfigurationStorageNode `json:"storageNodes,omitempty"` + + // Ports is the list of ports available for communication between nodes in the system. + // Valid values are single integers, or a range of values of the form "START-END" where + // START is an integer value that represents the start of a port range and END is an + // integer value that represents the end of the port range (inclusive). + Ports []intstr.IntOrString `json:"ports,omitempty"` + + // PortsCooldownInSeconds is the number of seconds to wait before a port can be reused. Defaults + // to 60 seconds (to match the typical value for the kernel's TIME_WAIT). A value of 0 means the + // ports can be reused immediately. + // +kubebuilder:default:=60 + PortsCooldownInSeconds int `json:"portsCooldownInSeconds"` +} + +// SystemConfigurationStatus defines the status of SystemConfiguration +type SystemConfigurationStatus struct { + // Ready indicates when the SystemConfiguration has been reconciled + Ready bool `json:"ready"` + + // Error information + ResourceError `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if SystemConfiguration is reconciled" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" + +// SystemConfiguration is the Schema for the systemconfigurations API +type SystemConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SystemConfigurationSpec `json:"spec,omitempty"` + Status SystemConfigurationStatus `json:"status,omitempty"` +} + +func (s *SystemConfiguration) GetStatus() updater.Status[*SystemConfigurationStatus] { + return &s.Status +} + +//+kubebuilder:object:root=true + +// SystemConfigurationList contains a list of SystemConfiguration +type SystemConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SystemConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SystemConfiguration{}, &SystemConfigurationList{}) +} + +// Computes returns the names of the computes attached to Rabbit nodes +func (in *SystemConfiguration) Computes() []*string { + // We expect that there can be a large number of compute nodes and we don't + // want to duplicate all of those names. + // So we'll walk spec.storageNodes twice so we can set the + // length/capacity for the array that will hold pointers to the names. + num := 0 + for i1 := range in.Spec.StorageNodes { + num += len(in.Spec.StorageNodes[i1].ComputesAccess) + } + + computes := make([]*string, num) + idx := 0 + for i2 := range in.Spec.StorageNodes { + for i3 := range in.Spec.StorageNodes[i2].ComputesAccess { + computes[idx] = &in.Spec.StorageNodes[i2].ComputesAccess[i3].Name + idx++ + } + } + return computes +} + +// ComputesExternal returns the names of the external compute nodes in the system +func (in *SystemConfiguration) ComputesExternal() []*string { + num := len(in.Spec.ExternalComputeNodes) + computes := make([]*string, num) + idx := 0 + + // Add the external computes. + for i := range in.Spec.ExternalComputeNodes { + computes[idx] = &in.Spec.ExternalComputeNodes[i].Name + idx++ + } + return computes +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_webhook.go similarity index 98% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_webhook.go index dab68447f..853479386 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/systemconfiguration_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/systemconfiguration_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( ctrl "sigs.k8s.io/controller-runtime" diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_types.go new file mode 100644 index 000000000..3c4e25177 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_types.go @@ -0,0 +1,289 @@ +/* + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha3 + +import ( + "fmt" + "strings" + + "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // WorkflowNameLabel is defined for resources that relate to the name of a DWS Workflow + WorkflowNameLabel = "dataworkflowservices.github.io/workflow.name" + + // WorkflowNamespaceLabel is defined for resources that relate to the namespace of a DWS Workflow + WorkflowNamespaceLabel = "dataworkflowservices.github.io/workflow.namespace" + + // WorkflowUIDLabel holds the UID of the parent workflow resource + WorkflowUidLabel = "dataworkflowservices.github.io/workflow.uid" +) + +// WorkflowState is the enumeration of the state of the workflow +// +kubebuilder:validation:Enum:=Proposal;Setup;DataIn;PreRun;PostRun;DataOut;Teardown +type WorkflowState string + +// WorkflowState values +const ( + StateProposal WorkflowState = "Proposal" + StateSetup WorkflowState = "Setup" + StateDataIn WorkflowState = "DataIn" + StatePreRun WorkflowState = "PreRun" + StatePostRun WorkflowState = "PostRun" + StateDataOut WorkflowState = "DataOut" + StateTeardown WorkflowState = "Teardown" +) + +// Next reports the next state after state s +func (s WorkflowState) next() WorkflowState { + switch s { + case "": + return StateProposal + case StateProposal: + return StateSetup + case StateSetup: + return StateDataIn + case StateDataIn: + return StatePreRun + case StatePreRun: + return StatePostRun + case StatePostRun: + return StateDataOut + case StateDataOut: + return StateTeardown + } + + panic(s) +} + +// Last reports whether the state s is the last state +func (s WorkflowState) last() bool { + return s == StateTeardown +} + +// After reports whether the state s is after t +func (s WorkflowState) after(t WorkflowState) bool { + + for !t.last() { + next := t.next() + if s == next { + return true + } + t = next + } + + return false +} + +// Strings associated with workflow statuses +const ( + StatusPending = "Pending" + StatusQueued = "Queued" + StatusRunning = "Running" + StatusCompleted = "Completed" + StatusTransientCondition = "TransientCondition" + StatusError = "Error" + StatusDriverWait = "DriverWait" +) + +// ToStatus will return a Status* string that goes with +// the given severity. +func (severity ResourceErrorSeverity) ToStatus() (string, error) { + switch severity { + case SeverityMinor: + return StatusRunning, nil + case SeverityMajor: + return StatusTransientCondition, nil + case SeverityFatal: + return StatusError, nil + default: + return "", fmt.Errorf("unknown severity: %s", string(severity)) + } +} + +// SeverityStringToStatus will return a Status* string that goes with +// the given severity. +// An empty severity string will be considered a minor severity. +func SeverityStringToStatus(severity string) (string, error) { + switch strings.ToLower(severity) { + case "", "minor": + return SeverityMinor.ToStatus() + case "major": + return SeverityMajor.ToStatus() + case "fatal": + return SeverityFatal.ToStatus() + default: + return "", fmt.Errorf("unknown severity: %s", severity) + } +} + +// WorkflowSpec defines the desired state of Workflow +type WorkflowSpec struct { + // Desired state for the workflow to be in. Unless progressing to the teardown state, + // this can only be set to the next state when the current desired state has been achieved. + DesiredState WorkflowState `json:"desiredState"` + + // WLMID identifies the Workflow Manager (WLM), and is set by the WLM + // when it creates the workflow resource. + WLMID string `json:"wlmID"` + + // JobID is the WLM job ID that corresponds to this workflow, and is + // set by the WLM when it creates the workflow resource. + JobID intstr.IntOrString `json:"jobID"` + + // UserID specifies the user ID for the workflow. The User ID is used by the various states + // in the workflow to ensure the user has permissions to perform certain actions. Used in + // conjunction with Group ID to run subtasks with UserID:GroupID credentials + UserID uint32 `json:"userID"` + + // GroupID specifies the group ID for the workflow. The Group ID is used by the various states + // in the workflow to ensure the group has permissions to perform certain actions. Used in + // conjunction with User ID to run subtasks with UserID:GroupID credentials. + GroupID uint32 `json:"groupID"` + + // Hurry indicates that the workflow's driver should kill the job in a hurry when this workflow enters its teardown state. + // The driver must release all resources and unmount any filesystems that were mounted as part of the workflow, though some drivers would have done this anyway as part of their teardown state. + // The driver must also kill any in-progress data transfers, or skip any data transfers that have not yet begun. + // +kubebuilder:default:=false + Hurry bool `json:"hurry,omitempty"` + + // List of #DW strings from a WLM job script + DWDirectives []string `json:"dwDirectives"` +} + +// WorkflowDriverStatus defines the status information provided by integration drivers. +type WorkflowDriverStatus struct { + DriverID string `json:"driverID"` + TaskID string `json:"taskID"` + DWDIndex int `json:"dwdIndex"` + + WatchState WorkflowState `json:"watchState"` + + LastHB int64 `json:"lastHB"` + Completed bool `json:"completed"` + + // User readable reason. + // For the CDS driver, this could be the state of the underlying + // data movement request + // +kubebuilder:validation:Enum=Pending;Queued;Running;Completed;TransientCondition;Error;DriverWait + Status string `json:"status,omitempty"` + + // Message provides additional details on the current status of the resource + Message string `json:"message,omitempty"` + + // Driver error string. This is not rolled up into the workflow's + // overall status section + Error string `json:"error,omitempty"` + + // CompleteTime reflects the time that the workflow reconciler marks the driver complete + CompleteTime *metav1.MicroTime `json:"completeTime,omitempty"` +} + +// WorkflowStatus defines the observed state of the Workflow +type WorkflowStatus struct { + // The state the resource is currently transitioning to. + // Updated by the controller once started. + State WorkflowState `json:"state,omitempty"` + + // Ready can be 'True', 'False' + // Indicates whether State has been reached. + Ready bool `json:"ready"` + + // User readable reason and status message. + // - Completed: The workflow has reached the state in workflow.Status.State. + // - DriverWait: The underlying drivers are currently running. + // - TransientCondition: A driver has encountered an error that might be recoverable. + // - Error: A driver has encountered an error that will not recover. + // +kubebuilder:validation:Enum=Completed;DriverWait;TransientCondition;Error + Status string `json:"status,omitempty"` + + // Message provides additional details on the current status of the resource + Message string `json:"message,omitempty"` + + // Set of DW environment variable settings for WLM to apply to the job. + // - DW_JOB_STRIPED + // - DW_JOB_PRIVATE + // - DW_JOB_STRIPED_CACHE + // - DW_JOB_LDBAL_CACHE + // - DW_PERSISTENT_STRIPED_{resname} + Env map[string]string `json:"env,omitempty"` + + // List of registered drivers and related status. Updated by drivers. + Drivers []WorkflowDriverStatus `json:"drivers,omitempty"` + + // List of #DW directive breakdowns indicating to WLM what to allocate on what Server + // 1 DirectiveBreakdown per #DW Directive that requires storage + DirectiveBreakdowns []corev1.ObjectReference `json:"directiveBreakdowns,omitempty"` + + // Reference to Computes + Computes corev1.ObjectReference `json:"computes,omitempty"` + + // Time of the most recent desiredState change + DesiredStateChange *metav1.MicroTime `json:"desiredStateChange,omitempty"` + + // Time of the most recent desiredState's achieving Ready status + ReadyChange *metav1.MicroTime `json:"readyChange,omitempty"` + + // Duration of the last state change + ElapsedTimeLastState string `json:"elapsedTimeLastState,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".status.state",description="Current state" +//+kubebuilder:printcolumn:name="READY",type="boolean",JSONPath=".status.ready",description="True if current state is achieved" +//+kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.status",description="Indicates achievement of current state" +//+kubebuilder:printcolumn:name="JOBID",type="string",JSONPath=".spec.jobID",description="Job ID" +//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="DESIREDSTATE",type="string",JSONPath=".spec.desiredState",description="Desired state",priority=1 +//+kubebuilder:printcolumn:name="DESIREDSTATECHANGE",type="date",JSONPath=".status.desiredStateChange",description="Time of most recent desiredState change",priority=1 +//+kubebuilder:printcolumn:name="ELAPSEDTIMELASTSTATE",type="string",JSONPath=".status.elapsedTimeLastState",description="Duration of last state change",priority=1 +//+kubebuilder:printcolumn:name="UID",type="string",JSONPath=".spec.userID",description="UID",priority=1 +//+kubebuilder:printcolumn:name="GID",type="string",JSONPath=".spec.groupID",description="GID",priority=1 + +// Workflow is the Schema for the workflows API +type Workflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WorkflowSpec `json:"spec,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` +} + +func (c *Workflow) GetStatus() updater.Status[*WorkflowStatus] { + return &c.Status +} + +//+kubebuilder:object:root=true + +// WorkflowList contains a list of Workflows +type WorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workflow `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Workflow{}, &WorkflowList{}) +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_webhook.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_webhook.go similarity index 97% rename from vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_webhook.go rename to vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_webhook.go index 8b3562dbd..06ac8abbe 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_webhook.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/workflow_webhook.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( "context" @@ -54,7 +54,7 @@ func (w *Workflow) SetupWebhookWithManager(mgr ctrl.Manager) error { // TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -//+kubebuilder:webhook:path=/mutate-dataworkflowservices-github-io-v1alpha2-workflow,mutating=true,failurePolicy=fail,sideEffects=None,groups=dataworkflowservices.github.io,resources=workflows,verbs=create,versions=v1alpha2,name=mworkflow.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/mutate-dataworkflowservices-github-io-v1alpha3-workflow,mutating=true,failurePolicy=fail,sideEffects=None,groups=dataworkflowservices.github.io,resources=workflows,verbs=create,versions=v1alpha3,name=mworkflow.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Defaulter = &Workflow{} @@ -75,7 +75,7 @@ func (w *Workflow) Default() { // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. // NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. // Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -//+kubebuilder:webhook:path=/validate-dataworkflowservices-github-io-v1alpha2-workflow,mutating=false,failurePolicy=fail,sideEffects=None,groups=dataworkflowservices.github.io,resources=workflows,verbs=create;update,versions=v1alpha2,name=vworkflow.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/validate-dataworkflowservices-github-io-v1alpha3-workflow,mutating=false,failurePolicy=fail,sideEffects=None,groups=dataworkflowservices.github.io,resources=workflows,verbs=create;update,versions=v1alpha3,name=vworkflow.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Validator = &Workflow{} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/zz_generated.deepcopy.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000..d558ad8da --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,1426 @@ +//go:build !ignore_autogenerated + +/* + * Copyright 2024 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "github.com/DataWorkflowServices/dws/utils/dwdparse" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationSetColocationConstraint) DeepCopyInto(out *AllocationSetColocationConstraint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationSetColocationConstraint. +func (in *AllocationSetColocationConstraint) DeepCopy() *AllocationSetColocationConstraint { + if in == nil { + return nil + } + out := new(AllocationSetColocationConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationSetConstraints) DeepCopyInto(out *AllocationSetConstraints) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Colocation != nil { + in, out := &in.Colocation, &out.Colocation + *out = make([]AllocationSetColocationConstraint, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationSetConstraints. +func (in *AllocationSetConstraints) DeepCopy() *AllocationSetConstraints { + if in == nil { + return nil + } + out := new(AllocationSetConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMount) DeepCopyInto(out *ClientMount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMount. +func (in *ClientMount) DeepCopy() *ClientMount { + if in == nil { + return nil + } + out := new(ClientMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClientMount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountDevice) DeepCopyInto(out *ClientMountDevice) { + *out = *in + if in.Lustre != nil { + in, out := &in.Lustre, &out.Lustre + *out = new(ClientMountDeviceLustre) + **out = **in + } + if in.LVM != nil { + in, out := &in.LVM, &out.LVM + *out = new(ClientMountDeviceLVM) + (*in).DeepCopyInto(*out) + } + if in.DeviceReference != nil { + in, out := &in.DeviceReference, &out.DeviceReference + *out = new(ClientMountDeviceReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountDevice. +func (in *ClientMountDevice) DeepCopy() *ClientMountDevice { + if in == nil { + return nil + } + out := new(ClientMountDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountDeviceLVM) DeepCopyInto(out *ClientMountDeviceLVM) { + *out = *in + if in.NVMeInfo != nil { + in, out := &in.NVMeInfo, &out.NVMeInfo + *out = make([]ClientMountNVMeDesc, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountDeviceLVM. +func (in *ClientMountDeviceLVM) DeepCopy() *ClientMountDeviceLVM { + if in == nil { + return nil + } + out := new(ClientMountDeviceLVM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountDeviceLustre) DeepCopyInto(out *ClientMountDeviceLustre) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountDeviceLustre. +func (in *ClientMountDeviceLustre) DeepCopy() *ClientMountDeviceLustre { + if in == nil { + return nil + } + out := new(ClientMountDeviceLustre) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountDeviceReference) DeepCopyInto(out *ClientMountDeviceReference) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountDeviceReference. +func (in *ClientMountDeviceReference) DeepCopy() *ClientMountDeviceReference { + if in == nil { + return nil + } + out := new(ClientMountDeviceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountInfo) DeepCopyInto(out *ClientMountInfo) { + *out = *in + in.Device.DeepCopyInto(&out.Device) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountInfo. +func (in *ClientMountInfo) DeepCopy() *ClientMountInfo { + if in == nil { + return nil + } + out := new(ClientMountInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountInfoStatus) DeepCopyInto(out *ClientMountInfoStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountInfoStatus. +func (in *ClientMountInfoStatus) DeepCopy() *ClientMountInfoStatus { + if in == nil { + return nil + } + out := new(ClientMountInfoStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountList) DeepCopyInto(out *ClientMountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClientMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountList. +func (in *ClientMountList) DeepCopy() *ClientMountList { + if in == nil { + return nil + } + out := new(ClientMountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClientMountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountNVMeDesc) DeepCopyInto(out *ClientMountNVMeDesc) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountNVMeDesc. +func (in *ClientMountNVMeDesc) DeepCopy() *ClientMountNVMeDesc { + if in == nil { + return nil + } + out := new(ClientMountNVMeDesc) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountSpec) DeepCopyInto(out *ClientMountSpec) { + *out = *in + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]ClientMountInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountSpec. +func (in *ClientMountSpec) DeepCopy() *ClientMountSpec { + if in == nil { + return nil + } + out := new(ClientMountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientMountStatus) DeepCopyInto(out *ClientMountStatus) { + *out = *in + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]ClientMountInfoStatus, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientMountStatus. +func (in *ClientMountStatus) DeepCopy() *ClientMountStatus { + if in == nil { + return nil + } + out := new(ClientMountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeBreakdown) DeepCopyInto(out *ComputeBreakdown) { + *out = *in + in.Constraints.DeepCopyInto(&out.Constraints) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeBreakdown. +func (in *ComputeBreakdown) DeepCopy() *ComputeBreakdown { + if in == nil { + return nil + } + out := new(ComputeBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeConstraints) DeepCopyInto(out *ComputeConstraints) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = make([]ComputeLocationConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeConstraints. +func (in *ComputeConstraints) DeepCopy() *ComputeConstraints { + if in == nil { + return nil + } + out := new(ComputeConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeLocationAccess) DeepCopyInto(out *ComputeLocationAccess) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeLocationAccess. +func (in *ComputeLocationAccess) DeepCopy() *ComputeLocationAccess { + if in == nil { + return nil + } + out := new(ComputeLocationAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeLocationConstraint) DeepCopyInto(out *ComputeLocationConstraint) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]ComputeLocationAccess, len(*in)) + copy(*out, *in) + } + out.Reference = in.Reference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeLocationConstraint. +func (in *ComputeLocationConstraint) DeepCopy() *ComputeLocationConstraint { + if in == nil { + return nil + } + out := new(ComputeLocationConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Computes) DeepCopyInto(out *Computes) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]ComputesData, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Computes. +func (in *Computes) DeepCopy() *Computes { + if in == nil { + return nil + } + out := new(Computes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Computes) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputesData) DeepCopyInto(out *ComputesData) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputesData. +func (in *ComputesData) DeepCopy() *ComputesData { + if in == nil { + return nil + } + out := new(ComputesData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputesList) DeepCopyInto(out *ComputesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Computes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputesList. +func (in *ComputesList) DeepCopy() *ComputesList { + if in == nil { + return nil + } + out := new(ComputesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DWDirectiveRule) DeepCopyInto(out *DWDirectiveRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = make([]dwdparse.DWDirectiveRuleSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DWDirectiveRule. +func (in *DWDirectiveRule) DeepCopy() *DWDirectiveRule { + if in == nil { + return nil + } + out := new(DWDirectiveRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DWDirectiveRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DWDirectiveRuleList) DeepCopyInto(out *DWDirectiveRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DWDirectiveRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DWDirectiveRuleList. +func (in *DWDirectiveRuleList) DeepCopy() *DWDirectiveRuleList { + if in == nil { + return nil + } + out := new(DWDirectiveRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DWDirectiveRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectiveBreakdown) DeepCopyInto(out *DirectiveBreakdown) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectiveBreakdown. +func (in *DirectiveBreakdown) DeepCopy() *DirectiveBreakdown { + if in == nil { + return nil + } + out := new(DirectiveBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectiveBreakdown) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectiveBreakdownList) DeepCopyInto(out *DirectiveBreakdownList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DirectiveBreakdown, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectiveBreakdownList. +func (in *DirectiveBreakdownList) DeepCopy() *DirectiveBreakdownList { + if in == nil { + return nil + } + out := new(DirectiveBreakdownList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DirectiveBreakdownList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectiveBreakdownSpec) DeepCopyInto(out *DirectiveBreakdownSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectiveBreakdownSpec. +func (in *DirectiveBreakdownSpec) DeepCopy() *DirectiveBreakdownSpec { + if in == nil { + return nil + } + out := new(DirectiveBreakdownSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectiveBreakdownStatus) DeepCopyInto(out *DirectiveBreakdownStatus) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageBreakdown) + (*in).DeepCopyInto(*out) + } + if in.Compute != nil { + in, out := &in.Compute, &out.Compute + *out = new(ComputeBreakdown) + (*in).DeepCopyInto(*out) + } + if in.RequiredDaemons != nil { + in, out := &in.RequiredDaemons, &out.RequiredDaemons + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectiveBreakdownStatus. +func (in *DirectiveBreakdownStatus) DeepCopy() *DirectiveBreakdownStatus { + if in == nil { + return nil + } + out := new(DirectiveBreakdownStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentStorageInstance) DeepCopyInto(out *PersistentStorageInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentStorageInstance. +func (in *PersistentStorageInstance) DeepCopy() *PersistentStorageInstance { + if in == nil { + return nil + } + out := new(PersistentStorageInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentStorageInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentStorageInstanceList) DeepCopyInto(out *PersistentStorageInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentStorageInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentStorageInstanceList. +func (in *PersistentStorageInstanceList) DeepCopy() *PersistentStorageInstanceList { + if in == nil { + return nil + } + out := new(PersistentStorageInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentStorageInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentStorageInstanceSpec) DeepCopyInto(out *PersistentStorageInstanceSpec) { + *out = *in + if in.ConsumerReferences != nil { + in, out := &in.ConsumerReferences, &out.ConsumerReferences + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentStorageInstanceSpec. +func (in *PersistentStorageInstanceSpec) DeepCopy() *PersistentStorageInstanceSpec { + if in == nil { + return nil + } + out := new(PersistentStorageInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentStorageInstanceStatus) DeepCopyInto(out *PersistentStorageInstanceStatus) { + *out = *in + out.Servers = in.Servers + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentStorageInstanceStatus. +func (in *PersistentStorageInstanceStatus) DeepCopy() *PersistentStorageInstanceStatus { + if in == nil { + return nil + } + out := new(PersistentStorageInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceError) DeepCopyInto(out *ResourceError) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(ResourceErrorInfo) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceError. +func (in *ResourceError) DeepCopy() *ResourceError { + if in == nil { + return nil + } + out := new(ResourceError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceErrorInfo) DeepCopyInto(out *ResourceErrorInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceErrorInfo. +func (in *ResourceErrorInfo) DeepCopy() *ResourceErrorInfo { + if in == nil { + return nil + } + out := new(ResourceErrorInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Servers) DeepCopyInto(out *Servers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Servers. +func (in *Servers) DeepCopy() *Servers { + if in == nil { + return nil + } + out := new(Servers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Servers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersList) DeepCopyInto(out *ServersList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Servers, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersList. +func (in *ServersList) DeepCopy() *ServersList { + if in == nil { + return nil + } + out := new(ServersList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServersList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersSpec) DeepCopyInto(out *ServersSpec) { + *out = *in + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]ServersSpecAllocationSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersSpec. +func (in *ServersSpec) DeepCopy() *ServersSpec { + if in == nil { + return nil + } + out := new(ServersSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersSpecAllocationSet) DeepCopyInto(out *ServersSpecAllocationSet) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]ServersSpecStorage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersSpecAllocationSet. +func (in *ServersSpecAllocationSet) DeepCopy() *ServersSpecAllocationSet { + if in == nil { + return nil + } + out := new(ServersSpecAllocationSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersSpecStorage) DeepCopyInto(out *ServersSpecStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersSpecStorage. +func (in *ServersSpecStorage) DeepCopy() *ServersSpecStorage { + if in == nil { + return nil + } + out := new(ServersSpecStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersStatus) DeepCopyInto(out *ServersStatus) { + *out = *in + if in.LastUpdate != nil { + in, out := &in.LastUpdate, &out.LastUpdate + *out = (*in).DeepCopy() + } + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]ServersStatusAllocationSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersStatus. +func (in *ServersStatus) DeepCopy() *ServersStatus { + if in == nil { + return nil + } + out := new(ServersStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersStatusAllocationSet) DeepCopyInto(out *ServersStatusAllocationSet) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make(map[string]ServersStatusStorage, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersStatusAllocationSet. +func (in *ServersStatusAllocationSet) DeepCopy() *ServersStatusAllocationSet { + if in == nil { + return nil + } + out := new(ServersStatusAllocationSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServersStatusStorage) DeepCopyInto(out *ServersStatusStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServersStatusStorage. +func (in *ServersStatusStorage) DeepCopy() *ServersStatusStorage { + if in == nil { + return nil + } + out := new(ServersStatusStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccess) DeepCopyInto(out *StorageAccess) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Node, len(*in)) + copy(*out, *in) + } + if in.Computes != nil { + in, out := &in.Computes, &out.Computes + *out = make([]Node, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccess. +func (in *StorageAccess) DeepCopy() *StorageAccess { + if in == nil { + return nil + } + out := new(StorageAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAllocationSet) DeepCopyInto(out *StorageAllocationSet) { + *out = *in + in.Constraints.DeepCopyInto(&out.Constraints) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAllocationSet. +func (in *StorageAllocationSet) DeepCopy() *StorageAllocationSet { + if in == nil { + return nil + } + out := new(StorageAllocationSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBreakdown) DeepCopyInto(out *StorageBreakdown) { + *out = *in + out.Reference = in.Reference + if in.AllocationSets != nil { + in, out := &in.AllocationSets, &out.AllocationSets + *out = make([]StorageAllocationSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBreakdown. +func (in *StorageBreakdown) DeepCopy() *StorageBreakdown { + if in == nil { + return nil + } + out := new(StorageBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageDevice) DeepCopyInto(out *StorageDevice) { + *out = *in + if in.WearLevel != nil { + in, out := &in.WearLevel, &out.WearLevel + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageDevice. +func (in *StorageDevice) DeepCopy() *StorageDevice { + if in == nil { + return nil + } + out := new(StorageDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]StorageDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Access.DeepCopyInto(&out.Access) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfiguration) DeepCopyInto(out *SystemConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfiguration. +func (in *SystemConfiguration) DeepCopy() *SystemConfiguration { + if in == nil { + return nil + } + out := new(SystemConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationComputeNodeReference) DeepCopyInto(out *SystemConfigurationComputeNodeReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationComputeNodeReference. +func (in *SystemConfigurationComputeNodeReference) DeepCopy() *SystemConfigurationComputeNodeReference { + if in == nil { + return nil + } + out := new(SystemConfigurationComputeNodeReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationExternalComputeNode) DeepCopyInto(out *SystemConfigurationExternalComputeNode) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationExternalComputeNode. +func (in *SystemConfigurationExternalComputeNode) DeepCopy() *SystemConfigurationExternalComputeNode { + if in == nil { + return nil + } + out := new(SystemConfigurationExternalComputeNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationList) DeepCopyInto(out *SystemConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SystemConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationList. +func (in *SystemConfigurationList) DeepCopy() *SystemConfigurationList { + if in == nil { + return nil + } + out := new(SystemConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationSpec) DeepCopyInto(out *SystemConfigurationSpec) { + *out = *in + if in.ExternalComputeNodes != nil { + in, out := &in.ExternalComputeNodes, &out.ExternalComputeNodes + *out = make([]SystemConfigurationExternalComputeNode, len(*in)) + copy(*out, *in) + } + if in.StorageNodes != nil { + in, out := &in.StorageNodes, &out.StorageNodes + *out = make([]SystemConfigurationStorageNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]intstr.IntOrString, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationSpec. +func (in *SystemConfigurationSpec) DeepCopy() *SystemConfigurationSpec { + if in == nil { + return nil + } + out := new(SystemConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationStatus) DeepCopyInto(out *SystemConfigurationStatus) { + *out = *in + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationStatus. +func (in *SystemConfigurationStatus) DeepCopy() *SystemConfigurationStatus { + if in == nil { + return nil + } + out := new(SystemConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemConfigurationStorageNode) DeepCopyInto(out *SystemConfigurationStorageNode) { + *out = *in + if in.ComputesAccess != nil { + in, out := &in.ComputesAccess, &out.ComputesAccess + *out = make([]SystemConfigurationComputeNodeReference, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemConfigurationStorageNode. +func (in *SystemConfigurationStorageNode) DeepCopy() *SystemConfigurationStorageNode { + if in == nil { + return nil + } + out := new(SystemConfigurationStorageNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workflow) DeepCopyInto(out *Workflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflow. +func (in *Workflow) DeepCopy() *Workflow { + if in == nil { + return nil + } + out := new(Workflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowDriverStatus) DeepCopyInto(out *WorkflowDriverStatus) { + *out = *in + if in.CompleteTime != nil { + in, out := &in.CompleteTime, &out.CompleteTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowDriverStatus. +func (in *WorkflowDriverStatus) DeepCopy() *WorkflowDriverStatus { + if in == nil { + return nil + } + out := new(WorkflowDriverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workflow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowList. +func (in *WorkflowList) DeepCopy() *WorkflowList { + if in == nil { + return nil + } + out := new(WorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { + *out = *in + out.JobID = in.JobID + if in.DWDirectives != nil { + in, out := &in.DWDirectives, &out.DWDirectives + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowSpec. +func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { + if in == nil { + return nil + } + out := new(WorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]WorkflowDriverStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DirectiveBreakdowns != nil { + in, out := &in.DirectiveBreakdowns, &out.DirectiveBreakdowns + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.Computes = in.Computes + if in.DesiredStateChange != nil { + in, out := &in.DesiredStateChange, &out.DesiredStateChange + *out = (*in).DeepCopy() + } + if in.ReadyChange != nil { + in, out := &in.ReadyChange, &out.ReadyChange + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. +func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { + if in == nil { + return nil + } + out := new(WorkflowStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_clientmounts.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_clientmounts.yaml index 440537793..261f34a3d 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_clientmounts.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_clientmounts.yaml @@ -569,6 +569,301 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The desired state + jsonPath: .spec.desiredState + name: DESIREDSTATE + type: string + - description: True if desired state is achieved + jsonPath: .status.allReady + name: READY + type: boolean + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: ClientMount is the Schema for the clientmounts API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClientMountSpec defines the desired state of ClientMount + properties: + desiredState: + description: Desired state of the mount point + enum: + - mounted + - unmounted + type: string + mounts: + description: List of mounts to create on this client + items: + description: ClientMountInfo defines a single mount + properties: + compute: + description: Compute is the name of the compute node which shares + this mount if present. Empty if not shared. + type: string + device: + description: Description of the device to mount + properties: + deviceReference: + description: |- + ClientMountDeviceReference is an reference to a different Kubernetes object + where device information can be found + properties: + data: + description: Optional private data for the driver + type: integer + objectReference: + description: Object reference for the device information + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - objectReference + type: object + lustre: + description: Lustre specific device information + properties: + fileSystemName: + description: Lustre fsname + type: string + mgsAddresses: + description: List of mgsAddresses of the form [address]@[lnet] + type: string + required: + - fileSystemName + - mgsAddresses + type: object + lvm: + description: LVM logical volume specific device information + properties: + deviceType: + description: Type of underlying block deices used for + the PVs + enum: + - nvme + type: string + logicalVolume: + description: LVM logical volume name + type: string + nvmeInfo: + description: List of NVMe namespaces that are used by + the VG + items: + description: ClientMountNVMeDesc uniquely describes + an NVMe namespace + properties: + deviceSerial: + description: Serial number of the base NVMe device + type: string + namespaceGUID: + description: Globally unique namespace ID + type: string + namespaceID: + description: Id of the Namespace on the NVMe device + (e.g., "2") + type: string + required: + - deviceSerial + - namespaceGUID + - namespaceID + type: object + type: array + volumeGroup: + description: LVM volume group name + type: string + required: + - deviceType + type: object + type: + description: ClientMountDeviceType specifies the go type + for device type + enum: + - lustre + - lvm + - reference + type: string + required: + - type + type: object + groupID: + description: GroupID to set for the mount + format: int32 + type: integer + mountPath: + description: Client path for mount target + type: string + options: + description: Options for the file system mount + type: string + setPermissions: + description: SetPermissions will set UserID and GroupID on the + mount if true + type: boolean + targetType: + description: TargetType determines whether the mount target + is a file or a directory + enum: + - file + - directory + type: string + type: + description: mount type + enum: + - lustre + - xfs + - gfs2 + - none + type: string + userID: + description: UserID to set for the mount + format: int32 + type: integer + required: + - device + - mountPath + - options + - setPermissions + - targetType + - type + type: object + minItems: 1 + type: array + node: + description: Name of the client node that is targeted by this mount + type: string + required: + - desiredState + - mounts + - node + type: object + status: + description: ClientMountStatus defines the observed state of ClientMount + properties: + allReady: + description: Rollup of each mounts ready status + type: boolean + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + mounts: + description: List of mount statuses + items: + description: ClientMountInfoStatus is the status for a single mount + point + properties: + ready: + description: Ready indicates whether status.state has been achieved + type: boolean + state: + description: Current state + enum: + - mounted + - unmounted + type: string + required: + - ready + - state + type: object + type: array + required: + - allReady + - mounts + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_computes.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_computes.yaml index 64d156012..0df0233fd 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_computes.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_computes.yaml @@ -52,6 +52,43 @@ spec: served: true storage: false - name: v1alpha2 + schema: + openAPIV3Schema: + description: Computes is the Schema for the computes API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + data: + items: + description: ComputesData defines the compute nodes that are assigned + to the workflow + properties: + name: + description: Name is the identifer name for the compute node + type: string + required: + - name + type: object + type: array + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true + storage: false + - name: v1alpha3 schema: openAPIV3Schema: description: Computes is the Schema for the computes API diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_directivebreakdowns.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_directivebreakdowns.yaml index 63f9cde1e..1fee4813d 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_directivebreakdowns.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_directivebreakdowns.yaml @@ -661,6 +661,343 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: True if allocation sets have been generated + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: DirectiveBreakdown is the Schema for the directivebreakdown API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DirectiveBreakdownSpec defines the directive string to breakdown + properties: + directive: + description: 'Directive is a copy of the #DW for this breakdown' + type: string + userID: + description: User ID of the user associated with the job + format: int32 + type: integer + required: + - directive + - userID + type: object + status: + description: DirectiveBreakdownStatus defines the storage information + WLM needs to select NNF Nodes and request storage from the selected + nodes + properties: + compute: + description: Compute is the compute breakdown for the directive + properties: + constraints: + description: Constraints to use when picking compute nodes + properties: + location: + description: Location is a list of location constraints + items: + description: |- + ComputeLocationConstraint describes a constraints on which compute nodes can be used with + a directive based on their location + properties: + access: + items: + properties: + priority: + description: Priority specifies whether the location + constraint is mandatory or best effort + enum: + - mandatory + - bestEffort + type: string + type: + description: Type is the relationship between + the compute nodes and the resource in the Reference + enum: + - physical + - network + type: string + required: + - priority + - type + type: object + type: array + reference: + description: Reference is an object reference to a resource + that contains the location information + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - access + - reference + type: object + type: array + type: object + type: object + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready indicates whether AllocationSets have been generated + (true) or not (false) + type: boolean + requiredDaemons: + description: RequiredDeamons tells the WLM about any driver-specific + daemons it must enable for the job; it is assumed that the WLM knows + about the driver-specific daemons and that if the users are specifying + these then the WLM knows how to start them + items: + type: string + type: array + storage: + description: Storage is the storage breakdown for the directive + properties: + allocationSets: + description: 'AllocationSets lists the allocations required to + fulfill the #DW Directive' + items: + description: StorageAllocationSet defines the details of an + allocation set + properties: + allocationStrategy: + description: AllocationStrategy specifies the way to determine + the number of allocations of the MinimumCapacity required + for this AllocationSet. + enum: + - AllocatePerCompute + - AllocateAcrossServers + - AllocateSingleServer + type: string + constraints: + description: |- + Constraint is an additional requirement pertaining to the suitability of Storage resources that may be used + for this AllocationSet + properties: + colocation: + description: |- + Colocation is a list of constraints for which Storage resources + to pick in relation to Storage resources for other allocation sets. + items: + description: |- + AllocationSetColocationConstraint specifies how to colocate storage resources. + A colocation constraint specifies how the location(s) of an allocation set should be + selected with relation to other allocation sets. Locations for allocation sets with the + same colocation key should be picked according to the colocation type. + properties: + key: + description: |- + Key shared by all the allocation sets that have their location constrained + in relation to each other. + type: string + type: + description: Type of colocation constraint + enum: + - exclusive + type: string + required: + - key + - type + type: object + type: array + count: + description: Count is the number of the allocations + to make + minimum: 1 + type: integer + labels: + description: Labels is a list of labels is used to filter + the Storage resources + items: + type: string + type: array + scale: + description: Scale is a hint for the number of allocations + to make based on a 1-10 value + maximum: 10 + minimum: 1 + type: integer + type: object + label: + description: |- + Label is an identifier used to communicate from the DWS interface to internal interfaces + the filesystem use of this AllocationSet. + enum: + - raw + - xfs + - gfs2 + - mgt + - mdt + - mgtmdt + - ost + type: string + minimumCapacity: + description: |- + MinimumCapacity is the minumum number of bytes required to meet the needs of the filesystem that + will use the storage. + format: int64 + minimum: 1 + type: integer + required: + - allocationStrategy + - label + - minimumCapacity + type: object + type: array + lifetime: + description: Lifetime is the duration of the allocation + enum: + - job + - persistent + type: string + reference: + description: Reference is an ObjectReference to another resource + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - lifetime + type: object + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_dwdirectiverules.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_dwdirectiverules.yaml index 8d77c240d..154b7279f 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_dwdirectiverules.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_dwdirectiverules.yaml @@ -95,6 +95,86 @@ spec: served: true storage: false - name: v1alpha2 + schema: + openAPIV3Schema: + description: DWDirectiveRule is the Schema for the DWDirective API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + items: + description: DWDirectiveRuleSpec defines the desired state of DWDirective + properties: + command: + description: 'Name of the #DW command. jobdw, stage_in, etc.' + type: string + driverLabel: + description: |- + Override for the Driver ID. If left empty this defaults to the + name of the DWDirectiveRule + type: string + ruleDefs: + description: 'List of key/value pairs this #DW command is expected + to have' + items: + description: DWDirectiveRuleDef defines the DWDirective parser + rules + properties: + isRequired: + type: boolean + isValueRequired: + type: boolean + key: + type: string + max: + type: integer + min: + type: integer + pattern: + type: string + patterns: + items: + type: string + type: array + type: + type: string + uniqueWithin: + type: string + required: + - key + - type + type: object + type: array + watchStates: + description: |- + Comma separated list of states that this rule wants to register for. + These watch states will result in an entry in the driver status array + in the Workflow resource + type: string + required: + - command + - ruleDefs + type: object + type: array + type: object + served: true + storage: false + - name: v1alpha3 schema: openAPIV3Schema: description: DWDirectiveRule is the Schema for the DWDirective API diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_persistentstorageinstances.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_persistentstorageinstances.yaml index 3ae7c4d53..a2f03cf24 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_persistentstorageinstances.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_persistentstorageinstances.yaml @@ -441,6 +441,233 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: PersistentStorageInstance is the Schema for the Persistentstorageinstances + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PersistentStorageInstanceSpec defines the desired state of + PersistentStorageInstance + properties: + consumerReferences: + description: List of consumers using this persistent storage + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + dwDirective: + description: 'DWDirective is a copy of the #DW for this instance' + type: string + fsType: + description: FsType describes the File System Type for this storage + instance. + enum: + - raw + - xfs + - gfs2 + - lustre + type: string + name: + description: Name is the name given to this persistent storage instance. + type: string + state: + description: Desired state of the PersistentStorageInstance + enum: + - Enabled + - Disabled + type: string + userID: + description: User ID of the user that created the persistent storage + format: int32 + type: integer + required: + - dwDirective + - fsType + - name + - state + - userID + type: object + status: + description: PersistentStorageInstanceStatus defines the observed state + of PersistentStorageInstance + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + type: boolean + servers: + description: Servers refers to the Servers resource that provides + the backing storage for this storage instance + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + state: + description: Current state of the PersistentStorageInstance + enum: + - Enabled + - Disabled + type: string + required: + - ready + - state + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_servers.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_servers.yaml index 4b0244273..8f7b09042 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_servers.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_servers.yaml @@ -289,6 +289,162 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: True if allocation sets have been generated + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: Servers is the Schema for the servers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServersSpec defines the desired state of Servers + properties: + allocationSets: + items: + description: |- + ServersSpecAllocationSet is a set of allocations that all share the same allocation + size and allocation type (e.g., XFS) + properties: + allocationSize: + description: Allocation size in bytes + format: int64 + minimum: 1 + type: integer + label: + description: Label as specified in the DirectiveBreakdown + type: string + storage: + description: List of storage resources where allocations are + created + items: + description: |- + ServersSpecStorage specifies info required to identify the storage to + use, and the number of allocations to make on that storage. + ServersSpecAllocationSet.AllocationSize specifies the size of each allocation. + properties: + allocationCount: + description: The number of allocations to create of the + size in bytes specified in ServersSpecAllocationSet + minimum: 1 + type: integer + name: + description: The name of the storage + type: string + required: + - allocationCount + - name + type: object + type: array + required: + - allocationSize + - label + - storage + type: object + type: array + type: object + status: + description: |- + ServersStatus specifies whether the Servers has achieved the + ready condition along with the allocationSets that are managed + by the Servers resource. + properties: + allocationSets: + items: + description: ServersStatusAllocationSet is the status of a set of + allocations + properties: + label: + description: Label as specified in the DirectiveBreakdown + type: string + storage: + additionalProperties: + description: ServersStatusStorage is the status of the allocations + on a storage + properties: + allocationSize: + description: Allocation size in bytes + format: int64 + type: integer + required: + - allocationSize + type: object + description: List of storage resources that have allocations + type: object + required: + - label + - storage + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + lastUpdate: + format: date-time + type: string + ready: + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_storages.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_storages.yaml index ab4454645..b6c56b8e2 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_storages.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_storages.yaml @@ -421,6 +421,220 @@ spec: - spec type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: State of the storage resource + jsonPath: .spec.state + name: State + type: string + - description: Status of the storage resource + jsonPath: .status.status + name: Status + type: string + - description: State of live updates + jsonPath: .spec.mode + name: Mode + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: Storage is the Schema for the storages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StorageSpec defines the desired specifications of Storage + resource + properties: + mode: + default: Live + description: |- + Mode indicates whether the resource is live and is being updated + by the reconcilers or whether it is in testing mode and is being + updated manually. + enum: + - Live + - Testing + type: string + state: + default: Enabled + description: State describes the desired state of the Storage resource. + enum: + - Enabled + - Disabled + type: string + type: object + status: + description: StorageData contains the data about the storage + properties: + access: + description: Access contains the information about where the storage + is accessible + properties: + computes: + description: Computes is the list of compute nodes that have access + to the storage + items: + description: Node provides the status of either a compute or + a server + properties: + name: + description: Name is the Kubernetes name of the node + type: string + status: + description: Status of the node + enum: + - Starting + - Ready + - Disabled + - NotPresent + - Offline + - Failed + - Degraded + - Drained + - Unknown + type: string + type: object + type: array + protocol: + description: Protocol is the method that this storage can be accessed + enum: + - PCIe + type: string + servers: + description: Servers is the list of non-compute nodes that have + access to the storage + items: + description: Node provides the status of either a compute or + a server + properties: + name: + description: Name is the Kubernetes name of the node + type: string + status: + description: Status of the node + enum: + - Starting + - Ready + - Disabled + - NotPresent + - Offline + - Failed + - Degraded + - Drained + - Unknown + type: string + type: object + type: array + type: object + capacity: + default: 0 + description: |- + Capacity is the number of bytes this storage provides. This is the + total accessible bytes as determined by the driver and may be different + than the sum of the devices' capacities. + format: int64 + type: integer + devices: + description: Devices is the list of physical devices that make up + this storage + items: + description: StorageDevice contains the details of the storage hardware + properties: + capacity: + description: |- + Capacity in bytes of the device. The full capacity may not + be usable depending on what the storage driver can provide. + format: int64 + type: integer + firmwareVersion: + description: The firmware version of this storage controller. + type: string + model: + description: Model is the manufacturer information about the + device + type: string + serialNumber: + description: The serial number for this storage controller. + type: string + slot: + description: Physical slot location of the storage controller. + type: string + status: + description: Status of the individual device + enum: + - Starting + - Ready + - Disabled + - NotPresent + - Offline + - Failed + - Degraded + - Drained + - Unknown + type: string + wearLevel: + description: |- + WearLevel in percent for SSDs. A value of 100 indicates the estimated endurance of the non-volatile memory + has been consumed, but may not indicate a storage failure. + format: int64 + type: integer + type: object + type: array + message: + description: Message provides additional details on the current status + of the resource + type: string + rebootRequired: + description: |- + Reboot Required is true if the node requires a reboot and false otherwise. A reboot my be + necessary to recover from certain hardware failures or high-availability clustering events. + type: boolean + status: + description: Status is the overall status of the storage + enum: + - Starting + - Ready + - Disabled + - NotPresent + - Offline + - Failed + - Degraded + - Drained + - Unknown + type: string + type: + description: Type describes what type of storage this is + enum: + - NVMe + type: string + required: + - capacity + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml index df6d0f59e..1e0d6b223 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_systemconfigurations.yaml @@ -280,6 +280,162 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: True if SystemConfiguration is reconciled + jsonPath: .status.ready + name: READY + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: SystemConfiguration is the Schema for the systemconfigurations + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + SystemConfigurationSpec describes the node layout of the system. This is filled in by + an administrator at software installation time. + properties: + externalComputeNodes: + description: |- + ExternalComputeNodes is the list of compute nodes that are not + directly matched with any of the StorageNodes. + items: + description: |- + SystemConfigurationExternalComputeNode describes a compute node that is + not directly matched with any of the nodes in the StorageNodes list. + properties: + name: + description: Name of the compute node + type: string + required: + - name + type: object + type: array + ports: + description: |- + Ports is the list of ports available for communication between nodes in the system. + Valid values are single integers, or a range of values of the form "START-END" where + START is an integer value that represents the start of a port range and END is an + integer value that represents the end of the port range (inclusive). + items: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: array + portsCooldownInSeconds: + default: 60 + description: |- + PortsCooldownInSeconds is the number of seconds to wait before a port can be reused. Defaults + to 60 seconds (to match the typical value for the kernel's TIME_WAIT). A value of 0 means the + ports can be reused immediately. + type: integer + storageNodes: + description: StorageNodes is the list of storage nodes on the system + items: + description: SystemConfigurationStorageNode describes a storage + node in the system + properties: + computesAccess: + description: ComputesAccess is the list of compute nodes that + can use the server + items: + description: |- + SystemConfigurationComputeNodeReference describes a compute node that + has access to a server. + properties: + index: + description: Index of the compute node from the server + type: integer + name: + description: Name of the compute node + type: string + required: + - index + - name + type: object + type: array + name: + description: Name of the server node + type: string + type: + description: Type is the type of server + type: string + required: + - name + - type + type: object + type: array + required: + - portsCooldownInSeconds + type: object + status: + description: SystemConfigurationStatus defines the status of SystemConfiguration + properties: + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: |- + Indication of how severe the error is. Minor will likely succeed, Major may + succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + - WLM + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + description: Ready indicates when the SystemConfiguration has been + reconciled + type: boolean + required: + - ready + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_workflows.yaml b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_workflows.yaml index 4ef5cd430..ce4b53b06 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_workflows.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/crd/bases/dataworkflowservices.github.io_workflows.yaml @@ -729,5 +729,376 @@ spec: type: object type: object served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - description: Current state + jsonPath: .status.state + name: STATE + type: string + - description: True if current state is achieved + jsonPath: .status.ready + name: READY + type: boolean + - description: Indicates achievement of current state + jsonPath: .status.status + name: STATUS + type: string + - description: Job ID + jsonPath: .spec.jobID + name: JOBID + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - description: Desired state + jsonPath: .spec.desiredState + name: DESIREDSTATE + priority: 1 + type: string + - description: Time of most recent desiredState change + jsonPath: .status.desiredStateChange + name: DESIREDSTATECHANGE + priority: 1 + type: date + - description: Duration of last state change + jsonPath: .status.elapsedTimeLastState + name: ELAPSEDTIMELASTSTATE + priority: 1 + type: string + - description: UID + jsonPath: .spec.userID + name: UID + priority: 1 + type: string + - description: GID + jsonPath: .spec.groupID + name: GID + priority: 1 + type: string + name: v1alpha3 + schema: + openAPIV3Schema: + description: Workflow is the Schema for the workflows API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkflowSpec defines the desired state of Workflow + properties: + desiredState: + description: |- + Desired state for the workflow to be in. Unless progressing to the teardown state, + this can only be set to the next state when the current desired state has been achieved. + enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + type: string + dwDirectives: + description: 'List of #DW strings from a WLM job script' + items: + type: string + type: array + groupID: + description: |- + GroupID specifies the group ID for the workflow. The Group ID is used by the various states + in the workflow to ensure the group has permissions to perform certain actions. Used in + conjunction with User ID to run subtasks with UserID:GroupID credentials. + format: int32 + type: integer + hurry: + default: false + description: |- + Hurry indicates that the workflow's driver should kill the job in a hurry when this workflow enters its teardown state. + The driver must release all resources and unmount any filesystems that were mounted as part of the workflow, though some drivers would have done this anyway as part of their teardown state. + The driver must also kill any in-progress data transfers, or skip any data transfers that have not yet begun. + type: boolean + jobID: + anyOf: + - type: integer + - type: string + description: |- + JobID is the WLM job ID that corresponds to this workflow, and is + set by the WLM when it creates the workflow resource. + x-kubernetes-int-or-string: true + userID: + description: |- + UserID specifies the user ID for the workflow. The User ID is used by the various states + in the workflow to ensure the user has permissions to perform certain actions. Used in + conjunction with Group ID to run subtasks with UserID:GroupID credentials + format: int32 + type: integer + wlmID: + description: |- + WLMID identifies the Workflow Manager (WLM), and is set by the WLM + when it creates the workflow resource. + type: string + required: + - desiredState + - dwDirectives + - groupID + - jobID + - userID + - wlmID + type: object + status: + description: WorkflowStatus defines the observed state of the Workflow + properties: + computes: + description: Reference to Computes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + desiredStateChange: + description: Time of the most recent desiredState change + format: date-time + type: string + directiveBreakdowns: + description: |- + List of #DW directive breakdowns indicating to WLM what to allocate on what Server + 1 DirectiveBreakdown per #DW Directive that requires storage + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + drivers: + description: List of registered drivers and related status. Updated + by drivers. + items: + description: WorkflowDriverStatus defines the status information + provided by integration drivers. + properties: + completeTime: + description: CompleteTime reflects the time that the workflow + reconciler marks the driver complete + format: date-time + type: string + completed: + type: boolean + driverID: + type: string + dwdIndex: + type: integer + error: + description: |- + Driver error string. This is not rolled up into the workflow's + overall status section + type: string + lastHB: + format: int64 + type: integer + message: + description: Message provides additional details on the current + status of the resource + type: string + status: + description: |- + User readable reason. + For the CDS driver, this could be the state of the underlying + data movement request + enum: + - Pending + - Queued + - Running + - Completed + - TransientCondition + - Error + - DriverWait + type: string + taskID: + type: string + watchState: + description: WorkflowState is the enumeration of the state of + the workflow + enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + type: string + required: + - completed + - driverID + - dwdIndex + - lastHB + - taskID + - watchState + type: object + type: array + elapsedTimeLastState: + description: Duration of the last state change + type: string + env: + additionalProperties: + type: string + description: "Set of DW environment variable settings for WLM to apply + to the job.\n\t\t- DW_JOB_STRIPED\n\t\t- DW_JOB_PRIVATE\n\t\t- DW_JOB_STRIPED_CACHE\n\t\t- + DW_JOB_LDBAL_CACHE\n\t\t- DW_PERSISTENT_STRIPED_{resname}" + type: object + message: + description: Message provides additional details on the current status + of the resource + type: string + ready: + description: |- + Ready can be 'True', 'False' + Indicates whether State has been reached. + type: boolean + readyChange: + description: Time of the most recent desiredState's achieving Ready + status + format: date-time + type: string + state: + description: |- + The state the resource is currently transitioning to. + Updated by the controller once started. + enum: + - Proposal + - Setup + - DataIn + - PreRun + - PostRun + - DataOut + - Teardown + type: string + status: + description: |- + User readable reason and status message. + - Completed: The workflow has reached the state in workflow.Status.State. + - DriverWait: The underlying drivers are currently running. + - TransientCondition: A driver has encountered an error that might be recoverable. + - Error: A driver has encountered an error that will not recover. + enum: + - Completed + - DriverWait + - TransientCondition + - Error + type: string + required: + - ready + type: object + type: object + served: true storage: true subresources: {} diff --git a/vendor/github.com/DataWorkflowServices/dws/config/webhook/manifests.yaml b/vendor/github.com/DataWorkflowServices/dws/config/webhook/manifests.yaml index 794e36902..47da19969 100644 --- a/vendor/github.com/DataWorkflowServices/dws/config/webhook/manifests.yaml +++ b/vendor/github.com/DataWorkflowServices/dws/config/webhook/manifests.yaml @@ -11,14 +11,14 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-dataworkflowservices-github-io-v1alpha2-workflow + path: /mutate-dataworkflowservices-github-io-v1alpha3-workflow failurePolicy: Fail name: mworkflow.kb.io rules: - apiGroups: - dataworkflowservices.github.io apiVersions: - - v1alpha2 + - v1alpha3 operations: - CREATE resources: @@ -37,14 +37,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-dataworkflowservices-github-io-v1alpha2-workflow + path: /validate-dataworkflowservices-github-io-v1alpha3-workflow failurePolicy: Fail name: vworkflow.kb.io rules: - apiGroups: - dataworkflowservices.github.io apiVersions: - - v1alpha2 + - v1alpha3 operations: - CREATE - UPDATE diff --git a/vendor/github.com/DataWorkflowServices/dws/controllers/clientmount_controller.go b/vendor/github.com/DataWorkflowServices/dws/controllers/clientmount_controller.go index 835bd9f41..9f29b133a 100644 --- a/vendor/github.com/DataWorkflowServices/dws/controllers/clientmount_controller.go +++ b/vendor/github.com/DataWorkflowServices/dws/controllers/clientmount_controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/utils/updater" ) @@ -54,7 +54,7 @@ const ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *ClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { - clientMount := &dwsv1alpha2.ClientMount{} + clientMount := &dwsv1alpha3.ClientMount{} if err := r.Get(ctx, req.NamespacedName, clientMount); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -64,7 +64,7 @@ func (r *ClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create a status updater that handles the call to r.Status().Update() if any of the fields // in clientMount.Status{} change - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.ClientMountStatus](clientMount) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.ClientMountStatus](clientMount) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { clientMount.Status.SetResourceError(err) }() @@ -84,7 +84,7 @@ func (r *ClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create the status section if it doesn't exist yet if len(clientMount.Status.Mounts) != len(clientMount.Spec.Mounts) { - clientMount.Status.Mounts = make([]dwsv1alpha2.ClientMountInfoStatus, len(clientMount.Spec.Mounts)) + clientMount.Status.Mounts = make([]dwsv1alpha3.ClientMountInfoStatus, len(clientMount.Spec.Mounts)) } // Initialize the status section if the desired state doesn't match the status state @@ -126,7 +126,7 @@ func filterByComputeNamespacePrefix() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *ClientMountReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dwsv1alpha2.ClientMount{}). + For(&dwsv1alpha3.ClientMount{}). WithEventFilter(filterByComputeNamespacePrefix()). Complete(r) } diff --git a/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go b/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go index 177fd73f5..c15944ba7 100644 --- a/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go +++ b/vendor/github.com/DataWorkflowServices/dws/controllers/systemconfiguration_controller.go @@ -26,7 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/internal/controller/metrics" "github.com/DataWorkflowServices/dws/utils/updater" "github.com/go-logr/logr" @@ -48,7 +48,7 @@ type SystemConfigurationReconciler struct { client.Client Log logr.Logger Scheme *runtime.Scheme - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } // +kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=systemconfigurations,verbs=get;list;watch;create;update;patch;delete @@ -61,7 +61,7 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. metrics.DwsReconcilesTotal.Inc() - systemConfiguration := &dwsv1alpha2.SystemConfiguration{} + systemConfiguration := &dwsv1alpha3.SystemConfiguration{} if err := r.Get(ctx, req.NamespacedName, systemConfiguration); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -71,7 +71,7 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. // Create a status updater that handles the call to r.Status().Update() if any of the fields // in systemConfiguration.Status{} change - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.SystemConfigurationStatus](systemConfiguration) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.SystemConfigurationStatus](systemConfiguration) defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { systemConfiguration.Status.SetResourceErrorAndLog(err, log) }() @@ -81,7 +81,7 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } - deleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, systemConfiguration) + deleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, systemConfiguration) if err != nil { return ctrl.Result{}, err } @@ -118,7 +118,7 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. for _, storageNode := range systemConfiguration.Spec.StorageNodes { // Create a storage resource for each storage node listed in the system configuration - storage := &dwsv1alpha2.Storage{ + storage := &dwsv1alpha3.Storage{ ObjectMeta: metav1.ObjectMeta{ Name: storageNode.Name, Namespace: corev1.NamespaceDefault, @@ -127,16 +127,16 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. result, err := ctrl.CreateOrUpdate(ctx, r.Client, storage, func() error { - dwsv1alpha2.AddOwnerLabels(storage, systemConfiguration) + dwsv1alpha3.AddOwnerLabels(storage, systemConfiguration) labels := storage.GetLabels() - labels[dwsv1alpha2.StorageTypeLabel] = storageNode.Type + labels[dwsv1alpha3.StorageTypeLabel] = storageNode.Type storage.SetLabels(labels) return ctrl.SetControllerReference(systemConfiguration, storage, r.Scheme) }) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("CreateOrUpdate failed for storage: %v", client.ObjectKeyFromObject(storage)).WithError(err) + return ctrl.Result{}, dwsv1alpha3.NewResourceError("CreateOrUpdate failed for storage: %v", client.ObjectKeyFromObject(storage)).WithError(err) } if result == controllerutil.OperationResultCreated { @@ -153,12 +153,12 @@ func (r *SystemConfigurationReconciler) Reconcile(ctx context.Context, req ctrl. // SetupWithManager sets up the controller with the Manager. func (r *SystemConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ - &dwsv1alpha2.StorageList{}, + r.ChildObjects = []dwsv1alpha3.ObjectList{ + &dwsv1alpha3.StorageList{}, } return ctrl.NewControllerManagedBy(mgr). - For(&dwsv1alpha2.SystemConfiguration{}). - Owns(&dwsv1alpha2.Storage{}). + For(&dwsv1alpha3.SystemConfiguration{}). + Owns(&dwsv1alpha3.Storage{}). Complete(r) } diff --git a/vendor/github.com/DataWorkflowServices/dws/controllers/workflow_controller.go b/vendor/github.com/DataWorkflowServices/dws/controllers/workflow_controller.go index ba08ea1dc..db8565ad8 100644 --- a/vendor/github.com/DataWorkflowServices/dws/controllers/workflow_controller.go +++ b/vendor/github.com/DataWorkflowServices/dws/controllers/workflow_controller.go @@ -1,5 +1,5 @@ /* - * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Copyright 2021-2024 Hewlett Packard Enterprise Development LP * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + dwsv1alpha3 "github.com/DataWorkflowServices/dws/api/v1alpha3" "github.com/DataWorkflowServices/dws/internal/controller/metrics" "github.com/DataWorkflowServices/dws/utils/updater" ) @@ -58,7 +58,7 @@ type WorkflowReconciler struct { client.Client Scheme *kruntime.Scheme Log logr.Logger - ChildObjects []dwsv1alpha2.ObjectList + ChildObjects []dwsv1alpha3.ObjectList } //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=workflows,verbs=get;list;watch;update;patch @@ -81,7 +81,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r metrics.DwsReconcilesTotal.Inc() // Fetch the Workflow workflow - workflow := &dwsv1alpha2.Workflow{} + workflow := &dwsv1alpha3.Workflow{} if err := r.Get(ctx, req.NamespacedName, workflow); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -89,7 +89,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r // Create a status updater that handles the call to r.Update() if any of the fields // in workflow.Status{} change. This is necessary since Status is not a subresource // of the workflow. - statusUpdater := updater.NewStatusUpdater[*dwsv1alpha2.WorkflowStatus](workflow) + statusUpdater := updater.NewStatusUpdater[*dwsv1alpha3.WorkflowStatus](workflow) defer func() { err = statusUpdater.CloseWithUpdate(ctx, r.Client, err) }() // Check if the object is being deleted @@ -104,7 +104,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r } // Delete all the Computes resources owned by the workflow - DeleteStatus, err := dwsv1alpha2.DeleteChildren(ctx, r.Client, r.ChildObjects, workflow) + DeleteStatus, err := dwsv1alpha3.DeleteChildren(ctx, r.Client, r.ChildObjects, workflow) if err != nil { return ctrl.Result{}, err } @@ -136,7 +136,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r log.Info("Workflow state transitioning", "state", workflow.Spec.DesiredState) workflow.Status.State = workflow.Spec.DesiredState workflow.Status.Ready = ConditionFalse - workflow.Status.Status = dwsv1alpha2.StatusDriverWait + workflow.Status.Status = dwsv1alpha3.StatusDriverWait workflow.Status.Message = "" ts := metav1.NowMicro() workflow.Status.DesiredStateChange = &ts @@ -145,7 +145,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r } // We must create Computes during proposal state - if workflow.Spec.DesiredState == dwsv1alpha2.StateProposal { + if workflow.Spec.DesiredState == dwsv1alpha3.StateProposal { computes, err := r.createComputes(ctx, workflow, workflow.Name, log) if err != nil { return ctrl.Result{}, err @@ -153,7 +153,7 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r // Ensure the computes reference is set cref := v1.ObjectReference{ - Kind: reflect.TypeOf(dwsv1alpha2.Computes{}).Name(), + Kind: reflect.TypeOf(dwsv1alpha3.Computes{}).Name(), Name: computes.Name, Namespace: computes.Namespace, } @@ -180,11 +180,11 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r } workflow.Status.Ready = true - workflow.Status.Status = dwsv1alpha2.StatusCompleted + workflow.Status.Status = dwsv1alpha3.StatusCompleted workflow.Status.Message = "" // Loop through the driver status array find the entries that are for the current state - drivers := []dwsv1alpha2.WorkflowDriverStatus{} + drivers := []dwsv1alpha3.WorkflowDriverStatus{} for _, driver := range workflow.Status.Drivers { if driver.WatchState != workflow.Status.State { @@ -219,10 +219,10 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r workflow.Status.Message = fmt.Sprintf("DW Directive %d: %s", driver.DWDIndex, driver.Message) } - if driver.Status == dwsv1alpha2.StatusTransientCondition || driver.Status == dwsv1alpha2.StatusError || driver.Status == dwsv1alpha2.StatusCompleted { + if driver.Status == dwsv1alpha3.StatusTransientCondition || driver.Status == dwsv1alpha3.StatusError || driver.Status == dwsv1alpha3.StatusCompleted { workflow.Status.Status = driver.Status } else { - workflow.Status.Status = dwsv1alpha2.StatusDriverWait + workflow.Status.Status = dwsv1alpha3.StatusDriverWait } } } @@ -237,9 +237,9 @@ func (r *WorkflowReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r return ctrl.Result{}, nil } -func (r *WorkflowReconciler) createComputes(ctx context.Context, wf *dwsv1alpha2.Workflow, name string, log logr.Logger) (*dwsv1alpha2.Computes, error) { +func (r *WorkflowReconciler) createComputes(ctx context.Context, wf *dwsv1alpha3.Workflow, name string, log logr.Logger) (*dwsv1alpha3.Computes, error) { - computes := &dwsv1alpha2.Computes{ + computes := &dwsv1alpha3.Computes{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: wf.Namespace, @@ -248,8 +248,8 @@ func (r *WorkflowReconciler) createComputes(ctx context.Context, wf *dwsv1alpha2 result, err := ctrl.CreateOrUpdate(ctx, r.Client, computes, func() error { - dwsv1alpha2.AddWorkflowLabels(computes, wf) - dwsv1alpha2.AddOwnerLabels(computes, wf) + dwsv1alpha3.AddWorkflowLabels(computes, wf) + dwsv1alpha3.AddOwnerLabels(computes, wf) // Link the Computes to the workflow return ctrl.SetControllerReference(wf, computes, r.Scheme) @@ -274,19 +274,19 @@ func (r *WorkflowReconciler) createComputes(ctx context.Context, wf *dwsv1alpha2 // the lowest priority and completed entries have the lowest priority. func statusPriority(status string) int { switch status { - case dwsv1alpha2.StatusCompleted: + case dwsv1alpha3.StatusCompleted: return 1 - case dwsv1alpha2.StatusDriverWait: + case dwsv1alpha3.StatusDriverWait: fallthrough - case dwsv1alpha2.StatusPending: + case dwsv1alpha3.StatusPending: fallthrough - case dwsv1alpha2.StatusQueued: + case dwsv1alpha3.StatusQueued: fallthrough - case dwsv1alpha2.StatusRunning: + case dwsv1alpha3.StatusRunning: return 2 - case dwsv1alpha2.StatusTransientCondition: + case dwsv1alpha3.StatusTransientCondition: return 3 - case dwsv1alpha2.StatusError: + case dwsv1alpha3.StatusError: return 4 } @@ -294,11 +294,11 @@ func statusPriority(status string) int { } type workflowStatusUpdater struct { - workflow *dwsv1alpha2.Workflow - existingStatus dwsv1alpha2.WorkflowStatus + workflow *dwsv1alpha3.Workflow + existingStatus dwsv1alpha3.WorkflowStatus } -func newWorkflowStatusUpdater(w *dwsv1alpha2.Workflow) *workflowStatusUpdater { +func newWorkflowStatusUpdater(w *dwsv1alpha3.Workflow) *workflowStatusUpdater { return &workflowStatusUpdater{ workflow: w, existingStatus: (*w.DeepCopy()).Status, @@ -318,14 +318,14 @@ func (w *workflowStatusUpdater) close(ctx context.Context, r *WorkflowReconciler // SetupWithManager sets up the controller with the Manager. func (r *WorkflowReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.ChildObjects = []dwsv1alpha2.ObjectList{ - &dwsv1alpha2.ComputesList{}, + r.ChildObjects = []dwsv1alpha3.ObjectList{ + &dwsv1alpha3.ComputesList{}, } maxReconciles := runtime.GOMAXPROCS(0) return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). - For(&dwsv1alpha2.Workflow{}). - Owns(&dwsv1alpha2.Computes{}). + For(&dwsv1alpha3.Workflow{}). + Owns(&dwsv1alpha3.Computes{}). Complete(r) } diff --git a/vendor/github.com/DataWorkflowServices/dws/github/cluster-api/util/conversion/conversion.go b/vendor/github.com/DataWorkflowServices/dws/github/cluster-api/util/conversion/conversion.go new file mode 100644 index 000000000..4d9970e32 --- /dev/null +++ b/vendor/github.com/DataWorkflowServices/dws/github/cluster-api/util/conversion/conversion.go @@ -0,0 +1,193 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conversion implements conversion utilities. +package conversion + +import ( + "math/rand" + "testing" + + "github.com/google/go-cmp/cmp" + fuzz "github.com/google/gofuzz" + "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metafuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +const ( + // DataAnnotation is the annotation that conversion webhooks + // use to retain the data in case of down-conversion from the hub. + DataAnnotation = "dataworkflowservices.github.io/conversion-data" +) + +// MarshalData stores the source object as json data in the destination object annotations map. +// It ignores the metadata of the source object. +func MarshalData(src metav1.Object, dst metav1.Object) error { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(src) + if err != nil { + return err + } + delete(u, "metadata") + + data, err := json.Marshal(u) + if err != nil { + return err + } + annotations := dst.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[DataAnnotation] = string(data) + dst.SetAnnotations(annotations) + return nil +} + +// UnmarshalData tries to retrieve the data from the annotation and unmarshals it into the object passed as input. +func UnmarshalData(from metav1.Object, to interface{}) (bool, error) { + annotations := from.GetAnnotations() + data, ok := annotations[DataAnnotation] + if !ok { + return false, nil + } + if err := json.Unmarshal([]byte(data), to); err != nil { + return false, err + } + delete(annotations, DataAnnotation) + from.SetAnnotations(annotations) + return true, nil +} + +// GetFuzzer returns a new fuzzer to be used for testing. +func GetFuzzer(scheme *runtime.Scheme, funcs ...fuzzer.FuzzerFuncs) *fuzz.Fuzzer { + funcs = append([]fuzzer.FuzzerFuncs{ + metafuzzer.Funcs, + func(_ runtimeserializer.CodecFactory) []interface{} { + return []interface{}{ + // Custom fuzzer for metav1.Time pointers which weren't + // fuzzed and always resulted in `nil` values. + // This implementation is somewhat similar to the one provided + // in the metafuzzer.Funcs. + func(input *metav1.Time, c fuzz.Continue) { + if input != nil { + var sec, nsec uint32 + c.Fuzz(&sec) + c.Fuzz(&nsec) + fuzzed := metav1.Unix(int64(sec), int64(nsec)).Rfc3339Copy() + input.Time = fuzzed.Time + } + }, + } + }, + }, funcs...) + return fuzzer.FuzzerFor( + fuzzer.MergeFuzzerFuncs(funcs...), + rand.NewSource(rand.Int63()), //nolint:gosec + runtimeserializer.NewCodecFactory(scheme), + ) +} + +// FuzzTestFuncInput contains input parameters +// for the FuzzTestFunc function. +type FuzzTestFuncInput struct { + Scheme *runtime.Scheme + + Hub conversion.Hub + HubAfterMutation func(conversion.Hub) + + Spoke conversion.Convertible + SpokeAfterMutation func(convertible conversion.Convertible) + SkipSpokeAnnotationCleanup bool + + FuzzerFuncs []fuzzer.FuzzerFuncs +} + +// FuzzTestFunc returns a new testing function to be used in tests to make sure conversions between +// the Hub version of an object and an older version aren't lossy. +func FuzzTestFunc(input FuzzTestFuncInput) func(*testing.T) { + if input.Scheme == nil { + input.Scheme = scheme.Scheme + } + + return func(t *testing.T) { + t.Helper() + t.Run("spoke-hub-spoke", func(t *testing.T) { + g := gomega.NewWithT(t) + fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...) + + for i := 0; i < 10000; i++ { + // Create the spoke and fuzz it + spokeBefore := input.Spoke.DeepCopyObject().(conversion.Convertible) + fuzzer.Fuzz(spokeBefore) + + // First convert spoke to hub + hubCopy := input.Hub.DeepCopyObject().(conversion.Hub) + g.Expect(spokeBefore.ConvertTo(hubCopy)).To(gomega.Succeed()) + + // Convert hub back to spoke and check if the resulting spoke is equal to the spoke before the round trip + spokeAfter := input.Spoke.DeepCopyObject().(conversion.Convertible) + g.Expect(spokeAfter.ConvertFrom(hubCopy)).To(gomega.Succeed()) + + // Remove data annotation eventually added by ConvertFrom for avoiding data loss in hub-spoke-hub round trips + // NOTE: There are use case when we want to skip this operation, e.g. if the spoke object does not have ObjectMeta (e.g. kubeadm types). + if !input.SkipSpokeAnnotationCleanup { + metaAfter := spokeAfter.(metav1.Object) + delete(metaAfter.GetAnnotations(), DataAnnotation) + } + + if input.SpokeAfterMutation != nil { + input.SpokeAfterMutation(spokeAfter) + } + + g.Expect(apiequality.Semantic.DeepEqual(spokeBefore, spokeAfter)).To(gomega.BeTrue(), cmp.Diff(spokeBefore, spokeAfter)) + } + }) + t.Run("hub-spoke-hub", func(t *testing.T) { + g := gomega.NewWithT(t) + fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...) + + for i := 0; i < 10000; i++ { + // Create the hub and fuzz it + hubBefore := input.Hub.DeepCopyObject().(conversion.Hub) + fuzzer.Fuzz(hubBefore) + + // First convert hub to spoke + dstCopy := input.Spoke.DeepCopyObject().(conversion.Convertible) + g.Expect(dstCopy.ConvertFrom(hubBefore)).To(gomega.Succeed()) + + // Convert spoke back to hub and check if the resulting hub is equal to the hub before the round trip + hubAfter := input.Hub.DeepCopyObject().(conversion.Hub) + g.Expect(dstCopy.ConvertTo(hubAfter)).To(gomega.Succeed()) + + if input.HubAfterMutation != nil { + input.HubAfterMutation(hubAfter) + } + + g.Expect(apiequality.Semantic.DeepEqual(hubBefore, hubAfter)).To(gomega.BeTrue(), cmp.Diff(hubBefore, hubAfter)) + } + }) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d14d11a7a..5f07bce92 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,9 +1,11 @@ -# github.com/DataWorkflowServices/dws v0.0.1-0.20241029172011-d5898d0b8640 +# github.com/DataWorkflowServices/dws v0.0.1-0.20241107211258-04c972c3e0b0 ## explicit; go 1.21 github.com/DataWorkflowServices/dws/api/v1alpha2 +github.com/DataWorkflowServices/dws/api/v1alpha3 github.com/DataWorkflowServices/dws/config/crd/bases github.com/DataWorkflowServices/dws/config/webhook github.com/DataWorkflowServices/dws/controllers +github.com/DataWorkflowServices/dws/github/cluster-api/util/conversion github.com/DataWorkflowServices/dws/internal/controller/metrics github.com/DataWorkflowServices/dws/utils/dwdparse github.com/DataWorkflowServices/dws/utils/ports