diff --git a/.github/workflows/rpm_build.yml b/.github/workflows/rpm_build.yml new file mode 100644 index 000000000..ca8e64710 --- /dev/null +++ b/.github/workflows/rpm_build.yml @@ -0,0 +1,73 @@ +name: RPM Build +on: + push: + branches: + - '*' + tags: + - 'v*' + +jobs: + repo_version: + runs-on: ubuntu-latest + outputs: + version_output: ${{ steps.step1.outputs.version }} + steps: + - name: Verify context + run: | + echo "ref is ${{ github.ref }}" + echo "ref_type is ${{ github.ref_type }}" + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} + - name: Get Version + id: step1 + run: echo "version=$(./git-version-gen)" >> $GITHUB_OUTPUT + + rpm_build: + runs-on: ubuntu-latest + needs: repo_version + container: + image: centos:8 + env: + NODE_ENV: development + ports: + - 80 + options: --cpus 1 + steps: + - name: "Build context" + env: + VERSION_OUTPUT: ${{ needs.repo_version.outputs.version_output }} + run: | + echo "ref is ${{ github.ref }}" + echo "ref_type is ${{ github.ref_type }}" + echo "head.sha is ${{ github.event.pull_request.head.sha }}" + echo "git-version-gen is $VERSION_OUTPUT" + + - name: checkout + uses: actions/checkout@v3 + - name: environment setup + env: + VERSION_OUTPUT: ${{ needs.repo_version.outputs.version_output }} + run: | + dnf -y --disablerepo '*' --enablerepo=extras swap centos-linux-repos centos-stream-repos + dnf -y distro-sync + dnf -y makecache --refresh + dnf install -y rpm-build rpmdevtools git make + dnf module -y install go-toolset + rpmdev-setuptree + echo $VERSION_OUTPUT > .rpmversion + cat .rpmversion + tar -czf /github/home/rpmbuild/SOURCES/nnf-clientmount-1.0.tar.gz --transform 's,^,nnf-clientmount-1.0/,' . + - name: build rpms + run: rpmbuild -ba clientmount.spec + - name: upload rpms + uses: actions/upload-artifact@v3 + with: + name: nnf-clientmount-1.0-1.el8.x86_64.rpm + path: /github/home/rpmbuild/RPMS/x86_64/nnf-clientmount-1.0-1.el8.x86_64.rpm + - name: upload srpms + uses: actions/upload-artifact@v3 + with: + name: nnf-clientmount-1.0-1.el8.src.rpm + path: /github/home/rpmbuild/SRPMS/nnf-clientmount-1.0-1.el8.src.rpm diff --git a/.gitignore b/.gitignore index 7fd985a2b..8d8658dc6 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ testbin/* commands.log kind-config.yaml standalone-playground +mount-daemon/clientmount nnf-sos .version config/begin/* diff --git a/Dockerfile b/Dockerfile index 659ca8d50..d52b76afc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,6 +27,7 @@ COPY go.sum go.sum COPY cmd/ cmd/ COPY api/ api/ COPY internal/ internal/ +COPY pkg/ pkg/ COPY vendor/ vendor/ COPY config/ config/ diff --git a/Makefile b/Makefile index a9049b267..a7dc87393 100644 --- a/Makefile +++ b/Makefile @@ -231,6 +231,10 @@ test: manifests generate fmt vet envtest ## Run tests. done ##@ Build +build-daemon: RPM_VERSION ?= $(shell ./git-version-gen) +build-daemon: PACKAGE = github.com/NearNodeFlash/nnf-sos/mount-daemon/version +build-daemon: manifests generate fmt vet ## Build standalone clientMount daemon + GOOS=linux GOARCH=amd64 go build -ldflags="-X '$(PACKAGE).version=$(RPM_VERSION)'" -o bin/clientmountd mount-daemon/main.go build: generate fmt vet ## Build manager binary. go build -o bin/manager cmd/main.go diff --git a/PROJECT b/PROJECT index ba2ebc443..08867fc17 100644 --- a/PROJECT +++ b/PROJECT @@ -34,6 +34,15 @@ resources: kind: NnfNodeStorage path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cray.hpe.com + group: nnf + kind: NnfNodeBlockStorage + path: github.com/NearNodeFlash/nnf-sos/api/v1alpha1 + version: v1alpha1 - controller: true domain: github.io group: dataworkflowservices diff --git a/api/v1alpha1/nnf_node_block_storage_types.go b/api/v1alpha1/nnf_node_block_storage_types.go new file mode 100644 index 000000000..2eed601bf --- /dev/null +++ b/api/v1alpha1/nnf_node_block_storage_types.go @@ -0,0 +1,127 @@ +/* + * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type NnfNodeBlockStorageAllocationSpec struct { + // Aggregate capacity of the block devices for each allocation + Capacity int64 `json:"capacity,omitempty"` + + // List of nodes where /dev devices should be created + Access []string `json:"access,omitempty"` +} + +// NnfNodeBlockStorageSpec defines the desired storage attributes on a NNF Node. +// Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. +type NnfNodeBlockStorageSpec struct { + // Allocations is the list of storage allocation to make + Allocations []NnfNodeBlockStorageAllocationSpec `json:"allocations,omitempty"` +} + +type NnfNodeBlockStorageStatus struct { + // Allocations is the list of storage allocations that were made + Allocations []NnfNodeBlockStorageAllocationStatus `json:"allocations,omitempty"` + + dwsv1alpha2.ResourceError `json:",inline"` + + Ready bool `json:"ready"` +} + +type NnfNodeBlockStorageDeviceStatus struct { + // NQN of the base NVMe device + NQN string `json:"NQN"` + + // Id of the Namespace on the NVMe device (e.g., "2") + NamespaceId string `json:"namespaceId"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` +} + +type NnfNodeBlockStorageAccessStatus struct { + // /dev paths for each of the block devices + DevicePaths []string `json:"devicePaths,omitempty"` + + // Redfish ID for the storage group + StorageGroupId string `json:"storageGroupId,omitempty"` +} + +type NnfNodeBlockStorageAllocationStatus struct { + Accesses map[string]NnfNodeBlockStorageAccessStatus `json:"accesses,omitempty"` + + // List of NVMe namespaces used by this allocation + Devices []NnfNodeBlockStorageDeviceStatus `json:"devices,omitempty"` + + // Total capacity allocated for the storage. This may differ from the requested storage + // capacity as the system may round up to the requested capacity satisify underlying + // storage requirements (i.e. block size / stripe size). + CapacityAllocated int64 `json:"capacityAllocated,omitempty"` + + // Redfish ID for the storage pool + StoragePoolId string `json:"storagePoolId,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +type NnfNodeBlockStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NnfNodeBlockStorageSpec `json:"spec,omitempty"` + Status NnfNodeBlockStorageStatus `json:"status,omitempty"` +} + +func (ns *NnfNodeBlockStorage) GetStatus() updater.Status[*NnfNodeBlockStorageStatus] { + return &ns.Status +} + +// +kubebuilder:object:root=true + +// NnfNodeBlockStorageList contains a list of NNF Nodes +type NnfNodeBlockStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NnfNodeBlockStorage `json:"items"` +} + +func (n *NnfNodeBlockStorageList) GetObjectList() []client.Object { + objectList := []client.Object{} + + for i := range n.Items { + objectList = append(objectList, &n.Items[i]) + } + + return objectList +} + +func init() { + SchemeBuilder.Register(&NnfNodeBlockStorage{}, &NnfNodeBlockStorageList{}) +} diff --git a/api/v1alpha1/nnf_node_storage_types.go b/api/v1alpha1/nnf_node_storage_types.go index bc31a1fc1..733b2aa81 100644 --- a/api/v1alpha1/nnf_node_storage_types.go +++ b/api/v1alpha1/nnf_node_storage_types.go @@ -22,6 +22,7 @@ package v1alpha1 import ( dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -43,15 +44,6 @@ type NnfNodeStorageSpec struct { // Group ID for file system GroupID uint32 `json:"groupID"` - // Set the owner and group permissions specified by UserID and GroupID. This is for - // Lustre file systems only, and should be set only after all Lustre targets are created. - // +kubebuilder:default:=false - SetOwnerGroup bool `json:"setOwnerGroup"` - - // Capacity defines the capacity, in bytes, of this storage specification. The NNF Node itself - // may split the storage among the available drives operating in the NNF Node. - Capacity int64 `json:"capacity,omitempty"` - // FileSystemType defines the type of the desired filesystem, or raw // block device. // +kubebuilder:validation:Enum=raw;lvm;zfs;xfs;gfs2;lustre @@ -62,18 +54,8 @@ type NnfNodeStorageSpec struct { // FileSystemType specifies a Lustre target. LustreStorage LustreStorageSpec `json:"lustreStorage,omitempty"` - // ClientEndpoints sets which endpoints should have access to an allocation. - ClientEndpoints []ClientEndpointsSpec `json:"clientEndpoints"` -} - -// ClientEndpointsSpec contains information about which nodes a storage allocation -// should be visible to -type ClientEndpointsSpec struct { - // Index of the allocation in the NnfNodeStorage - AllocationIndex int `json:"allocationIndex"` - - // List of nodes that should see the allocation - NodeNames []string `json:"nodeNames"` + // BlockReference is an object reference to an NnfNodeBlockStorage + BlockReference corev1.ObjectReference `json:"blockReference,omitempty"` } // LustreStorageSpec describes the Lustre target to be created here. @@ -83,7 +65,7 @@ type LustreStorageSpec struct { FileSystemName string `json:"fileSystemName,omitempty"` // TargetType is the type of Lustre target to be created. - // +kubebuilder:validation:Enum=MGT;MDT;MGTMDT;OST + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost TargetType string `json:"targetType,omitempty"` // StartIndex is used to order a series of MDTs or OSTs. This is used only @@ -93,9 +75,9 @@ type LustreStorageSpec struct { // +kubebuilder:validation:Minimum:=0 StartIndex int `json:"startIndex,omitempty"` - // MgsNode is the NID of the MGS to use. This is used only when + // MgsAddress is the NID of the MGS to use. This is used only when // creating MDT and OST targets. - MgsNode string `json:"mgsNode,omitempty"` + MgsAddress string `json:"mgsAddress,omitempty"` // BackFs is the type of backing filesystem to use. // +kubebuilder:validation:Enum=ldiskfs;zfs @@ -107,79 +89,27 @@ type NnfNodeStorageStatus struct { // Allocations is the list of storage allocations that were made Allocations []NnfNodeStorageAllocationStatus `json:"allocations,omitempty"` - dwsv1alpha2.ResourceError `json:",inline"` - - // LustreStorageStatus describes the Lustre targets created here. - LustreStorage LustreStorageStatus `json:"lustreStorage,omitempty"` - - // OwnerGroupStatus is the status of the operation for setting the owner and group - // of a file system - OwnerGroupStatus NnfResourceStatusType `json:"ownerGroupStatus,omitempty"` -} - -// NnfNodeStorageNVMeStatus provides a way to uniquely identify an NVMe namespace -// in the system -type NnfNodeStorageNVMeStatus struct { - // Serial number of the base NVMe device - DeviceSerial string `json:"deviceSerial"` - - // Id of the Namespace on the NVMe device (e.g., "2") - NamespaceID string `json:"namespaceID"` + Ready bool `json:"ready,omitempty"` - // Globally unique namespace ID - NamespaceGUID string `json:"namespaceGUID"` + dwsv1alpha2.ResourceError `json:",inline"` } // NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage type NnfNodeStorageAllocationStatus struct { - // Represents the time when the storage was created by the controller - // It is represented in RFC3339 form and is in UTC. - CreationTime *metav1.Time `json:"creationTime,omitempty"` - - // Represents the time when the storage was deleted by the controller. This field - // is updated when the Storage specification State transitions to 'Delete' by the - // client. - // It is represented in RFC3339 form and is in UTC. - DeletionTime *metav1.Time `json:"deletionTime,omitempty"` - - // Total capacity allocated for the storage. This may differ from the requested storage - // capacity as the system may round up to the requested capacity satisify underlying - // storage requirements (i.e. block size / stripe size). - CapacityAllocated int64 `json:"capacityAllocated,omitempty"` - - // Represents the storage group that is supporting this server. A storage group is - // the mapping from a group of drive namespaces to an individual server. This value - // can be safely ignored by the client. - StorageGroup NnfResourceStatus `json:"storageGroup,omitempty"` - // Name of the LVM VG VolumeGroup string `json:"volumeGroup,omitempty"` // Name of the LVM LV LogicalVolume string `json:"logicalVolume,omitempty"` - // List of NVMe namespaces used by this allocation - NVMeList []NnfNodeStorageNVMeStatus `json:"nvmeList,omitempty"` - - // Represents the file share that is supporting this server. A file share is the - // combination of a storage group and the associated file system parameters (type, mountpoint) - // that makes up the available storage. - FileShare NnfResourceStatus `json:"fileShare,omitempty"` - - StoragePool NnfResourceStatus `json:"storagePool,omitempty"` - - FileSystem NnfResourceStatus `json:"fileSystem,omitempty"` + Ready bool `json:"ready,omitempty"` } -// LustreStorageStatus describes the Lustre target created here. -type LustreStorageStatus struct { - - // Nid (LNet Network Identifier) of this node. This is populated on MGS nodes only. - Nid string `json:"nid,omitempty"` -} - -//+kubebuilder:object:root=true - +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfNodeStorage is the Schema for the NnfNodeStorage API type NnfNodeStorage struct { metav1.TypeMeta `json:",inline"` @@ -194,8 +124,6 @@ func (ns *NnfNodeStorage) GetStatus() updater.Status[*NnfNodeStorageStatus] { } //+kubebuilder:object:root=true -//+kubebuilder:printcolumn:name="ERROR",type="string",JSONPath=".status.error.severity" -//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // NnfNodeStorageList contains a list of NNF Nodes type NnfNodeStorageList struct { diff --git a/api/v1alpha1/nnf_node_types.go b/api/v1alpha1/nnf_node_types.go index c7e8434c5..d79295213 100644 --- a/api/v1alpha1/nnf_node_types.go +++ b/api/v1alpha1/nnf_node_types.go @@ -55,6 +55,9 @@ type NnfNodeStatus struct { // Fenced is true when the NNF Node is fenced by the STONITH agent, and false otherwise. Fenced bool `json:"fenced,omitempty"` + // LNetNid is the LNet address for the NNF node + LNetNid string `json:"lnetNid,omitempty"` + Capacity int64 `json:"capacity,omitempty"` CapacityAllocated int64 `json:"capacityAllocated,omitempty"` diff --git a/api/v1alpha1/nnf_storage_types.go b/api/v1alpha1/nnf_storage_types.go index 633451600..0468be73c 100644 --- a/api/v1alpha1/nnf_storage_types.go +++ b/api/v1alpha1/nnf_storage_types.go @@ -47,16 +47,16 @@ type NnfStorageLustreSpec struct { FileSystemName string `json:"fileSystemName,omitempty"` // TargetType is the type of Lustre target to be created. - // +kubebuilder:validation:Enum=MGT;MDT;MGTMDT;OST + // +kubebuilder:validation:Enum=mgt;mdt;mgtmdt;ost TargetType string `json:"targetType,omitempty"` // BackFs is the type of backing filesystem to use. // +kubebuilder:validation:Enum=ldiskfs;zfs BackFs string `json:"backFs,omitempty"` - // ExternalMgsNid is the NID of the MGS when a pre-existing MGS is - // provided by the DataWarp directive (#DW). - ExternalMgsNid string `json:"externalMgsNid,omitempty"` + // MgsAddress is the NID of the MGS when a pre-existing MGS is + // provided in the NnfStorageProfile + MgsAddress string `json:"mgsAddress,omitempty"` // PersistentMgsReference is a reference to a persistent storage that is providing // the external MGS. @@ -105,11 +105,7 @@ type NnfStorageSpec struct { // NnfStorageAllocationSetStatus contains the status information for an allocation set type NnfStorageAllocationSetStatus struct { - // Status reflects the status of this allocation set - Status NnfResourceStatusType `json:"status,omitempty"` - - // Health reflects the health of this allocation set - Health NnfResourceHealthType `json:"health,omitempty"` + Ready bool `json:"ready,omitempty"` // AllocationCount is the total number of allocations that currently // exist @@ -118,10 +114,8 @@ type NnfStorageAllocationSetStatus struct { // NnfStorageStatus defines the observed status of NNF Storage. type NnfStorageStatus struct { - // Important: Run "make" to regenerate code after modifying this file - - // MgsNode is the NID of the MGS. - MgsNode string `json:"mgsNode,omitempty"` + // MgsAddress is the NID of the MGS. + MgsAddress string `json:"mgsAddress,omitempty"` // AllocationsSets holds the status information for each of the AllocationSets // from the spec. @@ -129,10 +123,8 @@ type NnfStorageStatus struct { dwsv1alpha2.ResourceError `json:",inline"` - // Status reflects the status of this NNF Storage - Status NnfResourceStatusType `json:"status,omitempty"` - - // TODO: Conditions + // Ready reflects the status of this NNF Storage + Ready bool `json:"ready,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/nnfstorageprofile_types.go b/api/v1alpha1/nnfstorageprofile_types.go index ee746527c..14959dc46 100644 --- a/api/v1alpha1/nnfstorageprofile_types.go +++ b/api/v1alpha1/nnfstorageprofile_types.go @@ -125,6 +125,9 @@ type NnfStorageProfileCmdLines struct { // PvCreate specifies the pvcreate commandline, minus the "pvcreate". PvCreate string `json:"pvCreate,omitempty"` + // PvRemove specifies the pvremove commandline, minus the "pvremove". + PvRemove string `json:"pvRemove,omitempty"` + // VgCreate specifies the vgcreate commandline, minus the "vgcreate". VgCreate string `json:"vgCreate,omitempty"` @@ -137,49 +140,47 @@ type NnfStorageProfileCmdLines struct { // LvCreate specifies the lvcreate commandline, minus the "lvcreate". LvCreate string `json:"lvCreate,omitempty"` + // LvChange specifies the various lvchange commandlines, minus the "lvchange" + LvChange NnfStorageProfileLVMLvChangeCmdLines `json:"lvChange,omitempty"` + // LvRemove specifies the lvcreate commandline, minus the "lvremove". LvRemove string `json:"lvRemove,omitempty"` + + // MountRabbit specifies mount options for mounting on the Rabbit. + MountRabbit string `json:"mountRabbit,omitempty"` + + // MountCompute specifies mount options for mounting on the Compute. + MountCompute string `json:"mountCompute,omitempty"` } // NnfStorageProfileLVMVgChangeCmdLines type NnfStorageProfileLVMVgChangeCmdLines struct { - // The vgchange commandline for activation, minus the "vgchange" command - Activate string `json:"activate,omitempty"` - - // The vgchange commandline for deactivation, minus the "vgchange" command - Deactivate string `json:"deactivate,omitempty"` - // The vgchange commandline for lockStart, minus the "vgchange" command LockStart string `json:"lockStart,omitempty"` + + // The vgchange commandline for lockStop, minus the "vgchange" command + LockStop string `json:"lockStop,omitempty"` } -// NnfStorageProfileMiscOptions defines options to use for the mount library, and other utilities. -type NnfStorageProfileMiscOptions struct { - // MountRabbit specifies mount options for mounting on the Rabbit. - // Use one array element per option. Do not prepend the options with "-o". - MountRabbit []string `json:"mountRabbit,omitempty"` +// NnfStorageProfileLVMVgChangeCmdLines +type NnfStorageProfileLVMLvChangeCmdLines struct { + // The lvchange commandline for activate, minus the "lvchange" command + Activate string `json:"activate,omitempty"` - // MountCompute specifies mount options for mounting on the Compute. - // Use one array element per option. Do not prepend the options with "-o". - MountCompute []string `json:"mountCompute,omitempty"` + // The lvchange commandline for deactivate, minus the "lvchange" command + Deactivate string `json:"deactivate,omitempty"` } // NnfStorageProfileGFS2Data defines the GFS2-specific configuration type NnfStorageProfileGFS2Data struct { // CmdLines contains commands to create volumes and filesystems. CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` - - // Options contains options for libraries. - Options NnfStorageProfileMiscOptions `json:"options,omitempty"` } // NnfStorageProfileXFSData defines the XFS-specific configuration type NnfStorageProfileXFSData struct { // CmdLines contains commands to create volumes and filesystems. CmdLines NnfStorageProfileCmdLines `json:"commandlines,omitempty"` - - // Options contains options for libraries. - Options NnfStorageProfileMiscOptions `json:"options,omitempty"` } // NnfStorageProfileRawData defines the Raw-specific configuration diff --git a/api/v1alpha1/nnfstorageprofile_webhook.go b/api/v1alpha1/nnfstorageprofile_webhook.go index c201f09dd..ad906c508 100644 --- a/api/v1alpha1/nnfstorageprofile_webhook.go +++ b/api/v1alpha1/nnfstorageprofile_webhook.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "reflect" + "strings" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -143,3 +144,34 @@ func (r *NnfStorageProfile) validateLustreTargetMiscOptions(targetMiscOptions Nn return nil } + +type VarHandler struct { + VarMap map[string]string +} + +func NewVarHandler(vars map[string]string) *VarHandler { + v := &VarHandler{} + v.VarMap = vars + return v +} + +// ListToVars splits the value of one of its variables, and creates a new +// indexed variable for each of the items in the split. +func (v *VarHandler) ListToVars(listVarName, newVarPrefix string) error { + theList, ok := v.VarMap[listVarName] + if !ok { + return fmt.Errorf("Unable to find the variable named %s", listVarName) + } + + for i, val := range strings.Split(theList, " ") { + v.VarMap[fmt.Sprintf("%s%d", newVarPrefix, i+1)] = val + } + return nil +} + +func (v *VarHandler) ReplaceAll(s string) string { + for key, value := range v.VarMap { + s = strings.ReplaceAll(s, key, value) + } + return s +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 3be517411..006ca82b2 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -29,26 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientEndpointsSpec) DeepCopyInto(out *ClientEndpointsSpec) { - *out = *in - if in.NodeNames != nil { - in, out := &in.NodeNames, &out.NodeNames - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientEndpointsSpec. -func (in *ClientEndpointsSpec) DeepCopy() *ClientEndpointsSpec { - if in == nil { - return nil - } - out := new(ClientEndpointsSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LustreStorageSpec) DeepCopyInto(out *LustreStorageSpec) { *out = *in @@ -64,21 +44,6 @@ func (in *LustreStorageSpec) DeepCopy() *LustreStorageSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LustreStorageStatus) DeepCopyInto(out *LustreStorageStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LustreStorageStatus. -func (in *LustreStorageStatus) DeepCopy() *LustreStorageStatus { - if in == nil { - return nil - } - out := new(LustreStorageStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfAccess) DeepCopyInto(out *NnfAccess) { *out = *in @@ -609,6 +574,192 @@ func (in *NnfNode) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorage) DeepCopyInto(out *NnfNodeBlockStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorage. +func (in *NnfNodeBlockStorage) DeepCopy() *NnfNodeBlockStorage { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopyInto(out *NnfNodeBlockStorageAccessStatus) { + *out = *in + if in.DevicePaths != nil { + in, out := &in.DevicePaths, &out.DevicePaths + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAccessStatus. +func (in *NnfNodeBlockStorageAccessStatus) DeepCopy() *NnfNodeBlockStorageAccessStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAccessStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopyInto(out *NnfNodeBlockStorageAllocationSpec) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationSpec. +func (in *NnfNodeBlockStorageAllocationSpec) DeepCopy() *NnfNodeBlockStorageAllocationSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopyInto(out *NnfNodeBlockStorageAllocationStatus) { + *out = *in + if in.Accesses != nil { + in, out := &in.Accesses, &out.Accesses + *out = make(map[string]NnfNodeBlockStorageAccessStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NnfNodeBlockStorageDeviceStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageAllocationStatus. +func (in *NnfNodeBlockStorageAllocationStatus) DeepCopy() *NnfNodeBlockStorageAllocationStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageAllocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopyInto(out *NnfNodeBlockStorageDeviceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageDeviceStatus. +func (in *NnfNodeBlockStorageDeviceStatus) DeepCopy() *NnfNodeBlockStorageDeviceStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageList) DeepCopyInto(out *NnfNodeBlockStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NnfNodeBlockStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageList. +func (in *NnfNodeBlockStorageList) DeepCopy() *NnfNodeBlockStorageList { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NnfNodeBlockStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageSpec) DeepCopyInto(out *NnfNodeBlockStorageSpec) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageSpec. +func (in *NnfNodeBlockStorageSpec) DeepCopy() *NnfNodeBlockStorageSpec { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfNodeBlockStorageStatus) DeepCopyInto(out *NnfNodeBlockStorageStatus) { + *out = *in + if in.Allocations != nil { + in, out := &in.Allocations, &out.Allocations + *out = make([]NnfNodeBlockStorageAllocationStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ResourceError.DeepCopyInto(&out.ResourceError) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeBlockStorageStatus. +func (in *NnfNodeBlockStorageStatus) DeepCopy() *NnfNodeBlockStorageStatus { + if in == nil { + return nil + } + out := new(NnfNodeBlockStorageStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeECData) DeepCopyInto(out *NnfNodeECData) { *out = *in @@ -814,7 +965,7 @@ func (in *NnfNodeStorage) DeepCopyInto(out *NnfNodeStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) } @@ -839,23 +990,6 @@ func (in *NnfNodeStorage) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageAllocationStatus) DeepCopyInto(out *NnfNodeStorageAllocationStatus) { *out = *in - if in.CreationTime != nil { - in, out := &in.CreationTime, &out.CreationTime - *out = (*in).DeepCopy() - } - if in.DeletionTime != nil { - in, out := &in.DeletionTime, &out.DeletionTime - *out = (*in).DeepCopy() - } - out.StorageGroup = in.StorageGroup - if in.NVMeList != nil { - in, out := &in.NVMeList, &out.NVMeList - *out = make([]NnfNodeStorageNVMeStatus, len(*in)) - copy(*out, *in) - } - out.FileShare = in.FileShare - out.StoragePool = in.StoragePool - out.FileSystem = in.FileSystem } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageAllocationStatus. @@ -900,32 +1034,11 @@ func (in *NnfNodeStorageList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfNodeStorageNVMeStatus) DeepCopyInto(out *NnfNodeStorageNVMeStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageNVMeStatus. -func (in *NnfNodeStorageNVMeStatus) DeepCopy() *NnfNodeStorageNVMeStatus { - if in == nil { - return nil - } - out := new(NnfNodeStorageNVMeStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfNodeStorageSpec) DeepCopyInto(out *NnfNodeStorageSpec) { *out = *in out.LustreStorage = in.LustreStorage - if in.ClientEndpoints != nil { - in, out := &in.ClientEndpoints, &out.ClientEndpoints - *out = make([]ClientEndpointsSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + out.BlockReference = in.BlockReference } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageSpec. @@ -944,12 +1057,9 @@ func (in *NnfNodeStorageStatus) DeepCopyInto(out *NnfNodeStorageStatus) { if in.Allocations != nil { in, out := &in.Allocations, &out.Allocations *out = make([]NnfNodeStorageAllocationStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } in.ResourceError.DeepCopyInto(&out.ResourceError) - out.LustreStorage = in.LustreStorage } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfNodeStorageStatus. @@ -1296,6 +1406,7 @@ func (in *NnfStorageProfile) DeepCopyObject() runtime.Object { func (in *NnfStorageProfileCmdLines) DeepCopyInto(out *NnfStorageProfileCmdLines) { *out = *in out.VgChange = in.VgChange + out.LvChange = in.LvChange } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileCmdLines. @@ -1312,8 +1423,8 @@ func (in *NnfStorageProfileCmdLines) DeepCopy() *NnfStorageProfileCmdLines { func (in *NnfStorageProfileData) DeepCopyInto(out *NnfStorageProfileData) { *out = *in in.LustreStorage.DeepCopyInto(&out.LustreStorage) - in.GFS2Storage.DeepCopyInto(&out.GFS2Storage) - in.XFSStorage.DeepCopyInto(&out.XFSStorage) + out.GFS2Storage = in.GFS2Storage + out.XFSStorage = in.XFSStorage out.RawStorage = in.RawStorage } @@ -1331,7 +1442,6 @@ func (in *NnfStorageProfileData) DeepCopy() *NnfStorageProfileData { func (in *NnfStorageProfileGFS2Data) DeepCopyInto(out *NnfStorageProfileGFS2Data) { *out = *in out.CmdLines = in.CmdLines - in.Options.DeepCopyInto(&out.Options) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileGFS2Data. @@ -1344,6 +1454,21 @@ func (in *NnfStorageProfileGFS2Data) DeepCopy() *NnfStorageProfileGFS2Data { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMLvChangeCmdLines) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileLVMLvChangeCmdLines. +func (in *NnfStorageProfileLVMLvChangeCmdLines) DeepCopy() *NnfStorageProfileLVMLvChangeCmdLines { + if in == nil { + return nil + } + out := new(NnfStorageProfileLVMLvChangeCmdLines) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileLVMVgChangeCmdLines) DeepCopyInto(out *NnfStorageProfileLVMVgChangeCmdLines) { *out = *in @@ -1449,31 +1574,6 @@ func (in *NnfStorageProfileLustreMiscOptions) DeepCopy() *NnfStorageProfileLustr return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NnfStorageProfileMiscOptions) DeepCopyInto(out *NnfStorageProfileMiscOptions) { - *out = *in - if in.MountRabbit != nil { - in, out := &in.MountRabbit, &out.MountRabbit - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MountCompute != nil { - in, out := &in.MountCompute, &out.MountCompute - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileMiscOptions. -func (in *NnfStorageProfileMiscOptions) DeepCopy() *NnfStorageProfileMiscOptions { - if in == nil { - return nil - } - out := new(NnfStorageProfileMiscOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NnfStorageProfileRawData) DeepCopyInto(out *NnfStorageProfileRawData) { *out = *in @@ -1494,7 +1594,6 @@ func (in *NnfStorageProfileRawData) DeepCopy() *NnfStorageProfileRawData { func (in *NnfStorageProfileXFSData) DeepCopyInto(out *NnfStorageProfileXFSData) { *out = *in out.CmdLines = in.CmdLines - in.Options.DeepCopyInto(&out.Options) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NnfStorageProfileXFSData. @@ -1549,3 +1648,25 @@ func (in *NnfStorageStatus) DeepCopy() *NnfStorageStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VarHandler) DeepCopyInto(out *VarHandler) { + *out = *in + if in.VarMap != nil { + in, out := &in.VarMap, &out.VarMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VarHandler. +func (in *VarHandler) DeepCopy() *VarHandler { + if in == nil { + return nil + } + out := new(VarHandler) + in.DeepCopyInto(out) + return out +} diff --git a/clientmount.spec b/clientmount.spec new file mode 100644 index 000000000..1a89b2b30 --- /dev/null +++ b/clientmount.spec @@ -0,0 +1,32 @@ +%undefine _missing_build_ids_terminate_build +%global debug_package %{nil} + +Name: nnf-clientmount +Version: 1.0 +Release: 1%{?dist} +Summary: Client mount daemon for near node flash + +Group: 1 +License: Apache-2.0 +URL: https://github.com/NearNodeFlash/nnf-sos +Source0: %{name}-%{version}.tar.gz + +BuildRequires: golang +BuildRequires: make + +%description +This package provides clientmountd for performing mount operations for the +near node flash software + +%prep +%setup -q + +%build +RPM_VERSION=$(cat .rpmversion) make build-daemon + +%install +mkdir -p %{buildroot}/usr/bin/ +install -m 755 bin/clientmountd %{buildroot}/usr/bin/clientmountd + +%files +/usr/bin/clientmountd diff --git a/cmd/main.go b/cmd/main.go index 14da81959..015ec3af1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -206,6 +206,14 @@ func (c *nodeLocalController) SetupReconcilers(mgr manager.Manager, opts *nnf.Op return err } + if err := (&controllers.NnfNodeBlockStorageReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("NnfNodeBlockStorage"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + return err + } + return (&controllers.NnfNodeStorageReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfNodeStorage"), diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml new file mode 100644 index 000000000..2f51030cc --- /dev/null +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml @@ -0,0 +1,159 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: nnfnodeblockstorages.nnf.cray.hpe.com +spec: + group: nnf.cray.hpe.com + names: + kind: NnfNodeBlockStorage + listKind: NnfNodeBlockStorageList + plural: nnfnodeblockstorages + singular: nnfnodeblockstorage + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NnfNodeBlockStorageSpec defines the desired storage attributes + on a NNF Node. Storage spec are created on bequest of the user and fullfilled + by the NNF Node Controller. + properties: + allocations: + description: Allocations is the list of storage allocation to make + items: + properties: + access: + description: List of nodes where /dev devices should be created + items: + type: string + type: array + capacity: + description: Aggregate capacity of the block devices for each + allocation + format: int64 + type: integer + type: object + type: array + type: object + status: + properties: + allocations: + description: Allocations is the list of storage allocations that were + made + items: + properties: + accesses: + additionalProperties: + properties: + devicePaths: + description: /dev paths for each of the block devices + items: + type: string + type: array + storageGroupId: + description: Redfish ID for the storage group + type: string + type: object + type: object + capacityAllocated: + description: Total capacity allocated for the storage. This + may differ from the requested storage capacity as the system + may round up to the requested capacity satisify underlying + storage requirements (i.e. block size / stripe size). + format: int64 + type: integer + devices: + description: List of NVMe namespaces used by this allocation + items: + properties: + NQN: + description: NQN of the base NVMe device + type: string + capacityAllocated: + description: Total capacity allocated for the storage. + This may differ from the requested storage capacity + as the system may round up to the requested capacity + satisify underlying storage requirements (i.e. block + size / stripe size). + format: int64 + type: integer + namespaceId: + description: Id of the Namespace on the NVMe device (e.g., + "2") + type: string + required: + - NQN + - namespaceId + type: object + type: array + storagePoolId: + description: Redfish ID for the storage pool + type: string + type: object + type: array + error: + description: Error information + properties: + debugMessage: + description: Internal debug message for the error + type: string + severity: + description: Indication of how severe the error is. Minor will + likely succeed, Major may succeed, and Fatal will never succeed. + enum: + - Minor + - Major + - Fatal + type: string + type: + description: Internal or user error + enum: + - Internal + - User + type: string + userMessage: + description: Optional user facing message if the error is relevant + to an end user + type: string + required: + - debugMessage + - severity + - type + type: object + ready: + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml index 3508c6782..b27d4d4d7 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodes.yaml @@ -132,6 +132,9 @@ spec: health: description: NnfResourceHealthType defines the health of an NNF resource. type: string + lnetNid: + description: LNetNid is the LNet address for the NNF node + type: string servers: items: description: NnfServerStatus defines the observed status of servers diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml index b4083b3fb..77d9f5774 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfnodestorages.yaml @@ -14,7 +14,17 @@ spec: singular: nnfnodestorage scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .status.error.severity + name: ERROR + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 schema: openAPIV3Schema: description: NnfNodeStorage is the Schema for the NnfNodeStorage API @@ -36,32 +46,43 @@ spec: on a NNF Node. Storage spec are created on bequest of the user and fullfilled by the NNF Node Controller. properties: - capacity: - description: Capacity defines the capacity, in bytes, of this storage - specification. The NNF Node itself may split the storage among the - available drives operating in the NNF Node. - format: int64 - type: integer - clientEndpoints: - description: ClientEndpoints sets which endpoints should have access - to an allocation. - items: - description: ClientEndpointsSpec contains information about which - nodes a storage allocation should be visible to - properties: - allocationIndex: - description: Index of the allocation in the NnfNodeStorage - type: integer - nodeNames: - description: List of nodes that should see the allocation - items: - type: string - type: array - required: - - allocationIndex - - nodeNames - type: object - type: array + blockReference: + description: BlockReference is an object reference to an NnfNodeBlockStorage + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic count: description: Count is the number of allocations to make on this node. All of the allocations will be created with the same parameters @@ -98,9 +119,9 @@ spec: filesystem. maxLength: 8 type: string - mgsNode: - description: MgsNode is the NID of the MGS to use. This is used - only when creating MDT and OST targets. + mgsAddress: + description: MgsAddress is the NID of the MGS to use. This is + used only when creating MDT and OST targets. type: string startIndex: description: StartIndex is used to order a series of MDTs or OSTs. This @@ -113,28 +134,20 @@ spec: targetType: description: TargetType is the type of Lustre target to be created. enum: - - MGT - - MDT - - MGTMDT - - OST + - mgt + - mdt + - mgtmdt + - ost type: string type: object - setOwnerGroup: - default: false - description: Set the owner and group permissions specified by UserID - and GroupID. This is for Lustre file systems only, and should be - set only after all Lustre targets are created. - type: boolean userID: description: User ID for file system format: int32 type: integer required: - - clientEndpoints - count - fileSystemType - groupID - - setOwnerGroup - userID type: object status: @@ -147,139 +160,11 @@ spec: description: NnfNodeStorageAllocationStatus defines the allocation status for each allocation in the NnfNodeStorage properties: - capacityAllocated: - description: Total capacity allocated for the storage. This - may differ from the requested storage capacity as the system - may round up to the requested capacity satisify underlying - storage requirements (i.e. block size / stripe size). - format: int64 - type: integer - creationTime: - description: Represents the time when the storage was created - by the controller It is represented in RFC3339 form and is - in UTC. - format: date-time - type: string - deletionTime: - description: Represents the time when the storage was deleted - by the controller. This field is updated when the Storage - specification State transitions to 'Delete' by the client. - It is represented in RFC3339 form and is in UTC. - format: date-time - type: string - fileShare: - description: Represents the file share that is supporting this - server. A file share is the combination of a storage group - and the associated file system parameters (type, mountpoint) - that makes up the available storage. - properties: - health: - description: NnfResourceHealthType defines the health of - an NNF resource. - type: string - id: - description: ID reflects the NNF Node unique identifier - for this NNF Server resource. - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - type: object - fileSystem: - description: NnfResourceStatus provides common fields that are - included in all NNF Resources - properties: - health: - description: NnfResourceHealthType defines the health of - an NNF resource. - type: string - id: - description: ID reflects the NNF Node unique identifier - for this NNF Server resource. - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - type: object logicalVolume: description: Name of the LVM LV type: string - nvmeList: - description: List of NVMe namespaces used by this allocation - items: - description: NnfNodeStorageNVMeStatus provides a way to uniquely - identify an NVMe namespace in the system - properties: - deviceSerial: - description: Serial number of the base NVMe device - type: string - namespaceGUID: - description: Globally unique namespace ID - type: string - namespaceID: - description: Id of the Namespace on the NVMe device (e.g., - "2") - type: string - required: - - deviceSerial - - namespaceGUID - - namespaceID - type: object - type: array - storageGroup: - description: Represents the storage group that is supporting - this server. A storage group is the mapping from a group of - drive namespaces to an individual server. This value can be - safely ignored by the client. - properties: - health: - description: NnfResourceHealthType defines the health of - an NNF resource. - type: string - id: - description: ID reflects the NNF Node unique identifier - for this NNF Server resource. - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - type: object - storagePool: - description: NnfResourceStatus provides common fields that are - included in all NNF Resources - properties: - health: - description: NnfResourceHealthType defines the health of - an NNF resource. - type: string - id: - description: ID reflects the NNF Node unique identifier - for this NNF Server resource. - type: string - name: - description: Name reflects the common name of this NNF Server - resource. - type: string - status: - description: NnfResourceStatusType is the string that indicates - the resource's status - type: string - type: object + ready: + type: boolean volumeGroup: description: Name of the LVM VG type: string @@ -314,20 +199,11 @@ spec: - severity - type type: object - lustreStorage: - description: LustreStorageStatus describes the Lustre targets created - here. - properties: - nid: - description: Nid (LNet Network Identifier) of this node. This - is populated on MGS nodes only. - type: string - type: object - ownerGroupStatus: - description: OwnerGroupStatus is the status of the operation for setting - the owner and group of a file system - type: string + ready: + type: boolean type: object type: object served: true storage: true + subresources: + status: {} diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml index 9a239a9e6..9c62f6477 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorageprofiles.yaml @@ -47,6 +47,19 @@ spec: description: CmdLines contains commands to create volumes and filesystems. properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object lvCreate: description: LvCreate specifies the lvcreate commandline, minus the "lvcreate". @@ -59,26 +72,34 @@ spec: description: Mkfs specifies the mkfs commandline, minus the "mkfs". type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string pvCreate: description: PvCreate specifies the pvcreate commandline, minus the "pvcreate". type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string vgChange: description: VgChange specifies the various vgchange commandlines, minus the "vgchange" properties: - activate: - description: The vgchange commandline for activation, - minus the "vgchange" command - type: string - deactivate: - description: The vgchange commandline for deactivation, - minus the "vgchange" command - type: string lockStart: description: The vgchange commandline for lockStart, minus the "vgchange" command type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string type: object vgCreate: description: VgCreate specifies the vgcreate commandline, @@ -89,24 +110,6 @@ spec: minus the "vgremove". type: string type: object - options: - description: Options contains options for libraries. - properties: - mountCompute: - description: MountCompute specifies mount options for mounting - on the Compute. Use one array element per option. Do not - prepend the options with "-o". - items: - type: string - type: array - mountRabbit: - description: MountRabbit specifies mount options for mounting - on the Rabbit. Use one array element per option. Do not - prepend the options with "-o". - items: - type: string - type: array - type: object type: object lustreStorage: description: LustreStorage defines the Lustre-specific configuration @@ -360,6 +363,19 @@ spec: description: CmdLines contains commands to create volumes and filesystems. properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object lvCreate: description: LvCreate specifies the lvcreate commandline, minus the "lvcreate". @@ -372,26 +388,34 @@ spec: description: Mkfs specifies the mkfs commandline, minus the "mkfs". type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string pvCreate: description: PvCreate specifies the pvcreate commandline, minus the "pvcreate". type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string vgChange: description: VgChange specifies the various vgchange commandlines, minus the "vgchange" properties: - activate: - description: The vgchange commandline for activation, - minus the "vgchange" command - type: string - deactivate: - description: The vgchange commandline for deactivation, - minus the "vgchange" command - type: string lockStart: description: The vgchange commandline for lockStart, minus the "vgchange" command type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string type: object vgCreate: description: VgCreate specifies the vgcreate commandline, @@ -410,6 +434,19 @@ spec: description: CmdLines contains commands to create volumes and filesystems. properties: + lvChange: + description: LvChange specifies the various lvchange commandlines, + minus the "lvchange" + properties: + activate: + description: The lvchange commandline for activate, minus + the "lvchange" command + type: string + deactivate: + description: The lvchange commandline for deactivate, + minus the "lvchange" command + type: string + type: object lvCreate: description: LvCreate specifies the lvcreate commandline, minus the "lvcreate". @@ -422,26 +459,34 @@ spec: description: Mkfs specifies the mkfs commandline, minus the "mkfs". type: string + mountCompute: + description: MountCompute specifies mount options for mounting + on the Compute. + type: string + mountRabbit: + description: MountRabbit specifies mount options for mounting + on the Rabbit. + type: string pvCreate: description: PvCreate specifies the pvcreate commandline, minus the "pvcreate". type: string + pvRemove: + description: PvRemove specifies the pvremove commandline, + minus the "pvremove". + type: string vgChange: description: VgChange specifies the various vgchange commandlines, minus the "vgchange" properties: - activate: - description: The vgchange commandline for activation, - minus the "vgchange" command - type: string - deactivate: - description: The vgchange commandline for deactivation, - minus the "vgchange" command - type: string lockStart: description: The vgchange commandline for lockStart, minus the "vgchange" command type: string + lockStop: + description: The vgchange commandline for lockStop, minus + the "vgchange" command + type: string type: object vgCreate: description: VgCreate specifies the vgcreate commandline, @@ -452,24 +497,6 @@ spec: minus the "vgremove". type: string type: object - options: - description: Options contains options for libraries. - properties: - mountCompute: - description: MountCompute specifies mount options for mounting - on the Compute. Use one array element per option. Do not - prepend the options with "-o". - items: - type: string - type: array - mountRabbit: - description: MountRabbit specifies mount options for mounting - on the Rabbit. Use one array element per option. Do not - prepend the options with "-o". - items: - type: string - type: array - type: object type: object required: - gfs2Storage diff --git a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml index 6fd79aefc..d506756f8 100644 --- a/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml +++ b/config/crd/bases/nnf.cray.hpe.com_nnfstorages.yaml @@ -66,15 +66,15 @@ spec: among the available drives operating in the NNF Node. format: int64 type: integer - externalMgsNid: - description: ExternalMgsNid is the NID of the MGS when a pre-existing - MGS is provided by the DataWarp directive (#DW). - type: string fileSystemName: description: FileSystemName is the fsname parameter for the Lustre filesystem. maxLength: 8 type: string + mgsAddress: + description: MgsAddress is the NID of the MGS when a pre-existing + MGS is provided in the NnfStorageProfile + type: string name: description: Name is a human readable label for this set of allocations (e.g., xfs) @@ -139,10 +139,10 @@ spec: targetType: description: TargetType is the type of Lustre target to be created. enum: - - MGT - - MDT - - MGTMDT - - OST + - mgt + - mdt + - mgtmdt + - ost type: string required: - capacity @@ -190,12 +190,8 @@ spec: description: AllocationCount is the total number of allocations that currently exist type: integer - health: - description: Health reflects the health of this allocation set - type: string - status: - description: Status reflects the status of this allocation set - type: string + ready: + type: boolean required: - allocationCount type: object @@ -229,12 +225,12 @@ spec: - severity - type type: object - mgsNode: - description: MgsNode is the NID of the MGS. - type: string - status: - description: Status reflects the status of this NNF Storage + mgsAddress: + description: MgsAddress is the NID of the MGS. type: string + ready: + description: Ready reflects the status of this NNF Storage + type: boolean type: object type: object served: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c185359d6..e50620c12 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,6 +4,7 @@ resources: - bases/nnf.cray.hpe.com_nnfnodes.yaml - bases/nnf.cray.hpe.com_nnfnodestorages.yaml + - bases/nnf.cray.hpe.com_nnfnodeblockstorages.yaml - bases/nnf.cray.hpe.com_nnfstorages.yaml - bases/nnf.cray.hpe.com_nnfdatamovements.yaml - bases/nnf.cray.hpe.com_nnfdatamovementmanagers.yaml diff --git a/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml b/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml index 796240c68..72e19ea98 100644 --- a/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml +++ b/config/examples/nnf_v1alpha1_nnfstorageprofile.yaml @@ -6,19 +6,19 @@ metadata: data: default: true lustreStorage: - combinedMgtMdt: true + combinedMgtMdt: false mgtCommandlines: zpoolCreate: -O canmount=off -o cachefile=none $POOL_NAME $DEVICE_LIST - mkfs: --mgs $VOL_NAME + mkfs: --mgs --backfstype=$BACKFS $ZVOL_NAME mdtCommandlines: zpoolCreate: -O canmount=off -o cachefile=none $POOL_NAME $DEVICE_LIST - mkfs: --mdt --fsname=$FS_NAME --mgsnode=$MGS_NID --index=$INDEX $VOL_NAME + mkfs: --mdt --backfstype=$BACKFS --fsname=$FS_NAME --mgsnode=$MGS_NID --index=$INDEX $ZVOL_NAME mgtMdtCommandlines: zpoolCreate: -O canmount=off -o cachefile=none $POOL_NAME $DEVICE_LIST - mkfs: --mgs --mdt --fsname=$FS_NAME --index=$INDEX $VOL_NAME + mkfs: --mgs --mdt --backfstype=$BACKFS --fsname=$FS_NAME --index=$INDEX $ZVOL_NAME ostCommandlines: zpoolCreate: -O canmount=off -o cachefile=none $POOL_NAME $DEVICE_LIST - mkfs: --ost --fsname=$FS_NAME --mgsnode=$MGS_NID --index=$INDEX $VOL_NAME + mkfs: --ost --backfstype=$BACKFS --fsname=$FS_NAME --mgsnode=$MGS_NID --index=$INDEX $ZVOL_NAME ostOptions: scale: 5 colocateComputes: true @@ -31,35 +31,46 @@ data: gfs2Storage: commandlines: pvCreate: $DEVICE - vgCreate: $VG_NAME $DEVICE_LIST + pvRemove: $DEVICE + vgCreate: --shared $VG_NAME $DEVICE_LIST vgChange: - activate: --activate ys $VG_NAME - deactivate: --activate n $VG_NAME lockStart: --lock-start $VG_NAME + lockStop: --lock-stop $VG_NAME vgRemove: $VG_NAME - lvCreate: --activate ys --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvCreate: --zero n --activate n --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvChange: + activate: --activate ys $VG_NAME/$LV_NAME + deactivate: --activate n $VG_NAME/$LV_NAME lvRemove: $VG_NAME mkfs: -j2 -p $PROTOCOL -t $CLUSTER_NAME:$LOCK_SPACE $DEVICE xfsStorage: commandlines: pvCreate: $DEVICE - vgCreate: $VG_NAME $DEVICE_LIST + pvRemove: $DEVICE + vgCreate: --shared $VG_NAME $DEVICE_LIST vgChange: - activate: --activate y $VG_NAME - deactivate: --activate n $VG_NAME + lockStart: --lock-start $VG_NAME + lockStop: --lock-stop $VG_NAME vgRemove: $VG_NAME - lvCreate: --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvCreate: --zero n --activate n --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvChange: + activate: --activate y $VG_NAME/$LV_NAME + deactivate: --activate n $VG_NAME/$LV_NAME lvRemove: $VG_NAME mkfs: $DEVICE rawStorage: commandlines: pvCreate: $DEVICE - vgCreate: $VG_NAME $DEVICE_LIST + pvRemove: $DEVICE + vgCreate: --shared $VG_NAME $DEVICE_LIST vgChange: - activate: --activate y $VG_NAME - deactivate: --activate n $VG_NAME + lockStart: --lock-start $VG_NAME + lockStop: --lock-stop $VG_NAME vgRemove: $VG_NAME - lvCreate: --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvCreate: --zero n --activate n --extents 100%VG --stripes $DEVICE_NUM --stripesize=32KiB --name $LV_NAME $VG_NAME + lvChange: + activate: --activate y $VG_NAME/$LV_NAME + deactivate: --activate n $VG_NAME/$LV_NAME lvRemove: $VG_NAME diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml index 8c75d9993..d3cc46be3 100644 --- a/config/prometheus/kustomization.yaml +++ b/config/prometheus/kustomization.yaml @@ -1,5 +1,5 @@ # Adds namespace to all resources. namespace: nnf-system -resources: -- monitor.yaml +#resources: +#- monitor.yaml diff --git a/config/rbac/clientmount_role.yaml b/config/rbac/clientmount_role.yaml new file mode 100644 index 000000000..b66d8d7d9 --- /dev/null +++ b/config/rbac/clientmount_role.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: clientmount-role +rules: +- apiGroups: + - dataworkflowservices.github.io + resources: + - clientmounts + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - dataworkflowservices.github.io + resources: + - clientmounts/finalizers + verbs: + - update +- apiGroups: + - dataworkflowservices.github.io + resources: + - clientmounts/status + verbs: + - get + - patch + - update +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfstorageprofiles + verbs: + - get + - list + - watch diff --git a/config/rbac/clientmount_role_binding.yaml b/config/rbac/clientmount_role_binding.yaml new file mode 100644 index 000000000..e8618214b --- /dev/null +++ b/config/rbac/clientmount_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clientmount-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: clientmount-role +subjects: +- kind: ServiceAccount + name: clientmount + namespace: system diff --git a/config/rbac/clientmount_service_account.yaml b/config/rbac/clientmount_service_account.yaml new file mode 100644 index 000000000..a745d7b28 --- /dev/null +++ b/config/rbac/clientmount_service_account.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: clientmount + namespace: system +--- +# As of Kubernetes 1.24, ServiceAccount tokens are no longer automatically +# generated. Instead, manually create the secret and the token key in the +# data field will be automatically set. +apiVersion: v1 +kind: Secret +metadata: + name: clientmount + namespace: system + annotations: + kubernetes.io/service-account.name: clientmount + kubernetes.io/service-account.namespace: system +type: kubernetes.io/service-account-token diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 38fd7d6b2..c769b782b 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -7,6 +7,9 @@ resources: - service_account.yaml - role.yaml - role_binding.yaml +- clientmount_service_account.yaml +- clientmount_role.yaml +- clientmount_role_binding.yaml - fencing_agent_service_account.yaml - fencing_agent_role.yaml - fencing_agent_role_binding.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c808b16bc..86c6edae0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -353,6 +353,33 @@ rules: - patch - update - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages/finalizers + verbs: + - update +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodeblockstorages/status + verbs: + - get + - patch + - update - apiGroups: - nnf.cray.hpe.com resources: @@ -424,6 +451,14 @@ rules: - nnfnodestorages/finalizers verbs: - update +- apiGroups: + - nnf.cray.hpe.com + resources: + - nnfnodestorages/status + verbs: + - get + - patch + - update - apiGroups: - nnf.cray.hpe.com resources: diff --git a/go.mod b/go.mod index 3a4104d5e..bd916f8f6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/NearNodeFlash/nnf-sos go 1.19 require ( - github.com/DataWorkflowServices/dws v0.0.1-0.20231031201121-13a5a69a969e + github.com/DataWorkflowServices/dws v0.0.1-0.20231204205237-79dec3ba94dd github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20231031201943-531116c1194e github.com/NearNodeFlash/nnf-ec v0.0.0-20231010162453-a8168bb6a52f github.com/ghodss/yaml v1.0.0 @@ -14,8 +14,10 @@ require ( github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.10 github.com/prometheus/client_golang v1.16.0 + github.com/takama/daemon v1.0.0 go.openly.dev/pointy v1.3.0 go.uber.org/zap v1.25.0 + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e golang.org/x/sync v0.3.0 k8s.io/api v0.28.1 k8s.io/apimachinery v0.28.1 @@ -76,7 +78,6 @@ require ( go.chromium.org/luci v0.0.0-20230227223707-c4460eb434d8 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/net v0.13.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sys v0.12.0 // indirect diff --git a/go.sum b/go.sum index ad726442e..6a57fed55 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataWorkflowServices/dws v0.0.1-0.20231031201121-13a5a69a969e h1:QhDrVNQ6zyJcnP0+I147Ei19QAoOL5sDvkjNTkRbELA= github.com/DataWorkflowServices/dws v0.0.1-0.20231031201121-13a5a69a969e/go.mod h1:grHFCu0CoUK8exzS57r6cdf4qHpG1Pv5nl0n7evpaUM= +github.com/DataWorkflowServices/dws v0.0.1-0.20231204205237-79dec3ba94dd h1:Iw0U3nuaia4VN5MncP2xihNIWm5Reun7KVuJwfNTZKk= +github.com/DataWorkflowServices/dws v0.0.1-0.20231204205237-79dec3ba94dd/go.mod h1:grHFCu0CoUK8exzS57r6cdf4qHpG1Pv5nl0n7evpaUM= github.com/HewlettPackard/structex v1.0.4 h1:RVTdN5FWhDWr1IkjllU8wxuLjISo4gr6u5ryZpzyHcA= github.com/HewlettPackard/structex v1.0.4/go.mod h1:3frC4RY/cPsP/4+N8rkxsNAGlQwHV+zDC7qvrN+N+rE= github.com/NearNodeFlash/lustre-fs-operator v0.0.1-0.20231031201943-531116c1194e h1:j+MNZYrAcwtaUxqA2CcJFyPLWhfxpO6fsIUXhXljY2U= @@ -183,6 +185,7 @@ github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdO github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= @@ -216,6 +219,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/takama/daemon v1.0.0 h1:XS3VLnFKmqw2Z7fQ/dHRarrVjdir9G3z7BEP8osjizQ= +github.com/takama/daemon v1.0.0/go.mod h1:gKlhcjbqtBODg5v9H1nj5dU1a2j2GemtuWSNLD5rxOE= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -283,6 +288,7 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/controller/dws_servers_controller.go b/internal/controller/dws_servers_controller.go index 2405e6308..8ae060968 100644 --- a/internal/controller/dws_servers_controller.go +++ b/internal/controller/dws_servers_controller.go @@ -195,7 +195,7 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * allocationSet := nnfStorage.Status.AllocationSets[storageIndex] - if allocationSet.Status != nnfv1alpha1.ResourceReady { + if allocationSet.Ready == false { ready = false } @@ -231,32 +231,32 @@ func (r *DWSServersReconciler) updateCapacityUsed(ctx context.Context, servers * matchLabels, } - nnfNodeStorageList := &nnfv1alpha1.NnfNodeStorageList{} - if err := r.List(ctx, nnfNodeStorageList, listOptions...); err != nil { + nnfNodeBlockStorageList := &nnfv1alpha1.NnfNodeBlockStorageList{} + if err := r.List(ctx, nnfNodeBlockStorageList, listOptions...); err != nil { return ctrl.Result{}, err } - if len(nnfNodeStorageList.Items) != len(nnfStorage.Spec.AllocationSets[storageIndex].Nodes) { + if len(nnfNodeBlockStorageList.Items) != len(nnfStorage.Spec.AllocationSets[storageIndex].Nodes) { ready = false } capacityAllocatedMap := make(map[string]int64) - for _, nnfNodeStorage := range nnfNodeStorageList.Items { + for _, nnfNodeBlockStorage := range nnfNodeBlockStorageList.Items { // There can be multiple allocations per Rabbit. Add them all up and present a // single size for the servers resource var allocationSize int64 - for _, nnfNodeAllocation := range nnfNodeStorage.Status.Allocations { + for _, nnfNodeAllocation := range nnfNodeBlockStorage.Status.Allocations { if nnfNodeAllocation.CapacityAllocated == 0 { ready = false } allocationSize += nnfNodeAllocation.CapacityAllocated } - if _, exists := capacityAllocatedMap[nnfNodeStorage.Namespace]; exists { - capacityAllocatedMap[nnfNodeStorage.Namespace] += allocationSize + if _, exists := capacityAllocatedMap[nnfNodeBlockStorage.Namespace]; exists { + capacityAllocatedMap[nnfNodeBlockStorage.Namespace] += allocationSize } else { - capacityAllocatedMap[nnfNodeStorage.Namespace] = allocationSize + capacityAllocatedMap[nnfNodeBlockStorage.Namespace] = allocationSize } } diff --git a/internal/controller/filesystem_helpers.go b/internal/controller/filesystem_helpers.go new file mode 100644 index 000000000..3c3cd5330 --- /dev/null +++ b/internal/controller/filesystem_helpers.go @@ -0,0 +1,333 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "context" + "fmt" + "os" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice/lvm" + "github.com/NearNodeFlash/nnf-sos/pkg/filesystem" + "github.com/go-logr/logr" + + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" +) + +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/finalizers,verbs=update +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;create;list;watch;update;patch;delete;deletecollection + +func getBlockDeviceAndFileSystemForKind(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { + + blockDevice, err := newMockBlockDevice(ctx, c, nnfNodeStorage, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create mock block device").WithError(err).WithMajor() + } + + fileSystem, err := newMockFileSystem(ctx, c, nnfNodeStorage, blockDevice, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create mock file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil +} + +// getBlockDeviceAndFileSystem returns blockdevice and filesystem interfaces based on the allocation type and NnfStorageProfile. +func getBlockDeviceAndFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, filesystem.FileSystem, error) { + _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") + if found || os.Getenv("ENVIRONMENT") == "kind" { + return getBlockDeviceAndFileSystemForKind(ctx, c, nnfNodeStorage, index, log) + } + + nnfStorageProfile, err := getPinnedStorageProfileFromLabel(ctx, c, nnfNodeStorage) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not find pinned storage profile").WithError(err).WithFatal() + } + + switch nnfNodeStorage.Spec.FileSystemType { + case "raw": + blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.RawStorage.CmdLines, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + } + + fileSystem, err := newBindFileSystem(ctx, c, nnfNodeStorage, blockDevice, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create XFS file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil + case "xfs": + blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.XFSStorage.CmdLines, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + } + + fileSystem, err := newXfsFileSystem(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.XFSStorage.CmdLines, blockDevice, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create XFS file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil + case "gfs2": + blockDevice, err := newLvmBlockDevice(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.GFS2Storage.CmdLines, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create LVM block device").WithError(err).WithMajor() + } + + fileSystem, err := newGfs2FileSystem(ctx, c, nnfNodeStorage, nnfStorageProfile.Data.GFS2Storage.CmdLines, blockDevice, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create GFS2 file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil + case "lustre": + commandLines := nnfv1alpha1.NnfStorageProfileLustreCmdLines{} + + switch nnfNodeStorage.Spec.LustreStorage.TargetType { + case "mgt": + commandLines = nnfStorageProfile.Data.LustreStorage.MgtCmdLines + break + case "mgtmdt": + commandLines = nnfStorageProfile.Data.LustreStorage.MgtMdtCmdLines + break + case "mdt": + commandLines = nnfStorageProfile.Data.LustreStorage.MdtCmdLines + break + case "ost": + commandLines = nnfStorageProfile.Data.LustreStorage.OstCmdLines + break + default: + return nil, nil, dwsv1alpha2.NewResourceError("invalid Lustre target type %s", nnfNodeStorage.Spec.LustreStorage.TargetType).WithFatal() + } + + blockDevice, err := newZpoolBlockDevice(ctx, c, nnfNodeStorage, commandLines, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create zpool block device").WithError(err).WithMajor() + } + + fileSystem, err := newLustreFileSystem(ctx, c, nnfNodeStorage, commandLines, blockDevice, index, log) + if err != nil { + return nil, nil, dwsv1alpha2.NewResourceError("could not create lustre file system").WithError(err).WithMajor() + } + + return blockDevice, fileSystem, nil + default: + break + } + + return nil, nil, dwsv1alpha2.NewResourceError("unsupported file system type %s", nnfNodeStorage.Spec.FileSystemType).WithMajor() +} + +func newZpoolBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileLustreCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { + zpool := blockdevice.Zpool{} + + if nnfNodeStorage.Spec.BlockReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name() { + return nil, nil + } + + nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeStorage.GetName(), + Namespace: nnfNodeStorage.GetNamespace(), + }, + } + + if err := c.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage); err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() + } + + zpool.Log = log + zpool.Devices = append([]string{}, nnfNodeBlockStorage.Status.Allocations[index].Accesses[os.Getenv("NNF_NODE_NAME")].DevicePaths...) + zpool.Name = fmt.Sprintf("%s-%s-%d", nnfNodeStorage.Spec.LustreStorage.FileSystemName, nnfNodeStorage.Spec.LustreStorage.TargetType, index) + zpool.DataSet = nnfNodeStorage.Spec.LustreStorage.TargetType + + zpool.CommandArgs.Create = cmdLines.ZpoolCreate + + return &zpool, nil +} + +func newLvmBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, index int, log logr.Logger) (blockdevice.BlockDevice, error) { + lvmDesc := blockdevice.Lvm{} + devices := []string{} + + if nnfNodeStorage.Spec.BlockReference.Kind == reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name() { + nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeStorage.GetName(), + Namespace: nnfNodeStorage.GetNamespace(), + }, + } + + err := c.Get(ctx, client.ObjectKeyFromObject(nnfNodeBlockStorage), nnfNodeBlockStorage) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get NnfNodeBlockStorage: %v", client.ObjectKeyFromObject(nnfNodeBlockStorage)).WithError(err).WithUserMessage("could not find storage allocation").WithMajor() + } + + devices = nnfNodeBlockStorage.Status.Allocations[index].Accesses[os.Getenv("NNF_NODE_NAME")].DevicePaths + } + + for _, device := range devices { + pv := lvm.NewPhysicalVolume(ctx, device) + lvmDesc.PhysicalVolumes = append(lvmDesc.PhysicalVolumes, pv) + } + + vgName, err := volumeGroupName(ctx, c, nnfNodeStorage, index) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get volume group name").WithError(err).WithMajor() + } + + lvName, err := logicalVolumeName(ctx, c, nnfNodeStorage, index) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get logical volume name").WithError(err).WithMajor() + } + + lvmDesc.Log = log + lvmDesc.VolumeGroup = lvm.NewVolumeGroup(ctx, vgName, lvmDesc.PhysicalVolumes) + lvmDesc.LogicalVolume = lvm.NewLogicalVolume(ctx, lvName, lvmDesc.VolumeGroup) + + lvmDesc.CommandArgs.PvArgs.Create = cmdLines.PvCreate + lvmDesc.CommandArgs.PvArgs.Remove = cmdLines.PvRemove + + lvmDesc.CommandArgs.VgArgs.Create = cmdLines.VgCreate + lvmDesc.CommandArgs.VgArgs.LockStart = cmdLines.VgChange.LockStart + lvmDesc.CommandArgs.VgArgs.LockStop = cmdLines.VgChange.LockStop + lvmDesc.CommandArgs.VgArgs.Remove = cmdLines.VgRemove + + lvmDesc.CommandArgs.LvArgs.Create = cmdLines.LvCreate + lvmDesc.CommandArgs.LvArgs.Activate = cmdLines.LvChange.Activate + lvmDesc.CommandArgs.LvArgs.Deactivate = cmdLines.LvChange.Deactivate + lvmDesc.CommandArgs.LvArgs.Remove = cmdLines.LvRemove + + return &lvmDesc, nil +} + +func newMockBlockDevice(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int, log logr.Logger) (blockdevice.BlockDevice, error) { + blockDevice := blockdevice.MockBlockDevice{ + Log: log, + } + + return &blockDevice, nil +} + +func newBindFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { + fs := filesystem.SimpleFileSystem{} + + fs.Log = log + fs.BlockDevice = blockDevice + fs.Type = "none" + fs.MountTarget = "file" + fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) + + fs.CommandArgs.Mkfs = "bind" + + return &fs, nil +} + +func newGfs2FileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { + fs := filesystem.SimpleFileSystem{} + + fs.Log = log + fs.BlockDevice = blockDevice + fs.Type = "gfs2" + fs.MountTarget = "directory" + fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) + + fs.CommandArgs.Mkfs = cmdLines.Mkfs + fs.CommandArgs.Vars = map[string]string{ + "$CLUSTER_NAME": nnfNodeStorage.Namespace, + "$LOCK_SPACE": fmt.Sprintf("fs-%02d-%x", index, nnfNodeStorage.GetUID()[0:5]), + "$PROTOCOL": "lock_dlm", + } + + return &fs, nil +} + +func newXfsFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { + fs := filesystem.SimpleFileSystem{} + + fs.Log = log + fs.BlockDevice = blockDevice + fs.Type = "xfs" + fs.MountTarget = "directory" + fs.TempDir = fmt.Sprintf("/mnt/temp/%s-%d", nnfNodeStorage.Name, index) + + fs.CommandArgs.Mkfs = cmdLines.Mkfs + + return &fs, nil +} + +func newLustreFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, cmdLines nnfv1alpha1.NnfStorageProfileLustreCmdLines, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { + fs := filesystem.LustreFileSystem{} + + fs.Log = log + fs.BlockDevice = blockDevice + fs.Name = nnfNodeStorage.Spec.LustreStorage.FileSystemName + fs.TargetType = nnfNodeStorage.Spec.LustreStorage.TargetType + fs.MgsAddress = nnfNodeStorage.Spec.LustreStorage.MgsAddress + fs.Index = nnfNodeStorage.Spec.LustreStorage.StartIndex + index + fs.BackFs = nnfNodeStorage.Spec.LustreStorage.BackFs + + fs.CommandArgs.Mkfs = cmdLines.Mkfs + + return &fs, nil +} + +func newMockFileSystem(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, blockDevice blockdevice.BlockDevice, index int, log logr.Logger) (filesystem.FileSystem, error) { + path := os.Getenv("MOCK_FILE_SYSTEM_PATH") + if len(path) == 0 { + path = "/mnt/filesystems" + } + + fs := filesystem.MockFileSystem{ + Log: log, + Path: fmt.Sprintf("/%s/%s-%d", path, nnfNodeStorage.GetName(), index), + } + + return &fs, nil +} + +func volumeGroupName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (string, error) { + labels := nnfNodeStorage.GetLabels() + + workflowUid, ok := labels[dwsv1alpha2.WorkflowUidLabel] + if !ok { + return "", fmt.Errorf("missing Workflow UID label on NnfNodeStorage") + } + directiveIndex, ok := labels[nnfv1alpha1.DirectiveIndexLabel] + if !ok { + return "", fmt.Errorf("missing directive index label on NnfNodeStorage") + } + + return fmt.Sprintf("%s_%s_%d", workflowUid, directiveIndex, index), nil +} + +func logicalVolumeName(ctx context.Context, c client.Client, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (string, error) { + // For now just return "lv" as the lv name. If we end up sharing a volume group for multiple lvs, then + // this name needs to be something unique + return "lv", nil +} diff --git a/internal/controller/integration_test.go b/internal/controller/integration_test.go index 7d180da38..3d8aeec29 100644 --- a/internal/controller/integration_test.go +++ b/internal/controller/integration_test.go @@ -269,137 +269,139 @@ var _ = Describe("Integration Test", func() { BeforeEach(func() { + // Initialize node names - currently set to three to satisify the lustre requirement of single MDT, MGT, OST + // NOTE: Node names require the "rabbit" prefix to ensure client mounts occur on the correct controller + nodeNames = []string{ + "rabbit-test-node-0", + "rabbit-test-node-1", + "rabbit-test-node-2", + } + setup.Do(func() { + for _, nodeName := range nodeNames { + // Create the namespace + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }} - // Initialize node names - currently set to three to satisify the lustre requirement of single MDT, MGT, OST - // NOTE: Node names require the "rabbit" prefix to ensure client mounts occur on the correct controller - nodeNames = []string{ - "rabbit-test-node-0", - "rabbit-test-node-1", - "rabbit-test-node-2", + Expect(k8sClient.Create(context.TODO(), ns)).To(Succeed()) } + }) // once - // Build the config map that ties everything together; this also - // creates a namespace for each compute node which is required for - // client mount. - computeNameGeneratorFunc := func() func() []dwsv1alpha2.SystemConfigurationComputeNodeReference { - nextComputeIndex := 0 - return func() []dwsv1alpha2.SystemConfigurationComputeNodeReference { - computes := make([]dwsv1alpha2.SystemConfigurationComputeNodeReference, 16) - for i := 0; i < 16; i++ { - name := fmt.Sprintf("compute%d", i+nextComputeIndex) - - computes[i].Name = name - computes[i].Index = i - } - nextComputeIndex += 16 - return computes + // Build the config map that ties everything together; this also + // creates a namespace for each compute node which is required for + // client mount. + computeNameGeneratorFunc := func() func() []dwsv1alpha2.SystemConfigurationComputeNodeReference { + nextComputeIndex := 0 + return func() []dwsv1alpha2.SystemConfigurationComputeNodeReference { + computes := make([]dwsv1alpha2.SystemConfigurationComputeNodeReference, 16) + for i := 0; i < 16; i++ { + name := fmt.Sprintf("compute%d", i+nextComputeIndex) + + computes[i].Name = name + computes[i].Index = i } + nextComputeIndex += 16 + return computes } + } - generator := computeNameGeneratorFunc() - configSpec := dwsv1alpha2.SystemConfigurationSpec{} - for _, nodeName := range nodeNames { - storageNode := dwsv1alpha2.SystemConfigurationStorageNode{ - Type: "Rabbit", - Name: nodeName, - } - - storageNode.ComputesAccess = generator() - configSpec.StorageNodes = append(configSpec.StorageNodes, storageNode) - for _, computeAccess := range storageNode.ComputesAccess { - compute := dwsv1alpha2.SystemConfigurationComputeNode{Name: computeAccess.Name} - configSpec.ComputeNodes = append(configSpec.ComputeNodes, compute) - } + generator := computeNameGeneratorFunc() + configSpec := dwsv1alpha2.SystemConfigurationSpec{} + for _, nodeName := range nodeNames { + storageNode := dwsv1alpha2.SystemConfigurationStorageNode{ + Type: "Rabbit", + Name: nodeName, } - config := &dwsv1alpha2.SystemConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: corev1.NamespaceDefault, - }, - Spec: configSpec, + storageNode.ComputesAccess = generator() + configSpec.StorageNodes = append(configSpec.StorageNodes, storageNode) + for _, computeAccess := range storageNode.ComputesAccess { + compute := dwsv1alpha2.SystemConfigurationComputeNode{Name: computeAccess.Name} + configSpec.ComputeNodes = append(configSpec.ComputeNodes, compute) } + } - Expect(k8sClient.Create(context.TODO(), config)).To(Succeed()) + config := &dwsv1alpha2.SystemConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: corev1.NamespaceDefault, + }, + Spec: configSpec, + } - // Each node gets a namespace, a node, and an NNF Node. Node would typically be handled - // by kubernetes and then an NNF Node & Namespace are started by the NLC; but for test - // we have to bootstrap all that. - for _, nodeName := range nodeNames { - // Create the namespace - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }} + Expect(k8sClient.Create(context.TODO(), config)).To(Succeed()) - Expect(k8sClient.Create(context.TODO(), ns)).To(Succeed()) + // Each node gets a namespace, a node, and an NNF Node. Node would typically be handled + // by kubernetes and then an NNF Node & Namespace are started by the NLC; but for test + // we have to bootstrap all that. - // Create the node - set it to up as ready - node := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Namespace: corev1.NamespaceDefault, - Labels: map[string]string{ - "cray.nnf.node": "true", - }, + for _, nodeName := range nodeNames { + // Create the node - set it to up as ready + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + Labels: map[string]string{ + "cray.nnf.node": "true", }, - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Status: corev1.ConditionTrue, - Type: corev1.NodeReady, - }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Status: corev1.ConditionTrue, + Type: corev1.NodeReady, }, }, - } + }, + } - Expect(k8sClient.Create(context.TODO(), node)).To(Succeed()) + Expect(k8sClient.Create(context.TODO(), node)).To(Succeed()) - // Create the NNF Node resource - nnfNode := &nnfv1alpha1.NnfNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nnf-nlc", - Namespace: nodeName, - }, - Spec: nnfv1alpha1.NnfNodeSpec{ - Name: nodeName, - State: nnfv1alpha1.ResourceEnable, - }, - Status: nnfv1alpha1.NnfNodeStatus{}, - } + // Create the NNF Node resource + nnfNode := &nnfv1alpha1.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: nodeName, + }, + Spec: nnfv1alpha1.NnfNodeSpec{ + Name: nodeName, + State: nnfv1alpha1.ResourceEnable, + }, + Status: nnfv1alpha1.NnfNodeStatus{}, + } - Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) + Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) - // Create the DWS Storage resource - storage := &dwsv1alpha2.Storage{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Namespace: corev1.NamespaceDefault, - }, - } + // Create the DWS Storage resource + storage := &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + }, + } - Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed()) + Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed()) - // Check that the DWS storage resource was updated with the compute node information + // Check that the DWS storage resource was updated with the compute node information - Eventually(func() error { - return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage) - }).Should(Succeed()) + Eventually(func() error { + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage) + }).Should(Succeed()) - Eventually(func() bool { - Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) - return len(storage.Status.Access.Computes) == 16 - }).Should(BeTrue()) + Eventually(func() bool { + Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) + return len(storage.Status.Access.Computes) == 16 + }).Should(BeTrue()) - // Check that a namespace was created for each compute node - for i := 0; i < len(nodeNames)*16; i++ { - namespace := &corev1.Namespace{} - Eventually(func() error { - return k8sClient.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("compute%d", i)}, namespace) - }).Should(Succeed()) - } + // Check that a namespace was created for each compute node + for i := 0; i < len(nodeNames)*16; i++ { + namespace := &corev1.Namespace{} + Eventually(func() error { + return k8sClient.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("compute%d", i)}, namespace) + }).Should(Succeed()) } - }) // once + } // Create a default NnfStorageProfile for the unit tests. storageProfile = createBasicDefaultNnfStorageProfile() @@ -423,6 +425,58 @@ var _ = Describe("Integration Test", func() { Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) + + for _, nodeName := range nodeNames { + storage := &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + }, + } + Expect(k8sClient.Delete(context.TODO(), storage)).To(Succeed()) + tempStorage := &dwsv1alpha2.Storage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), tempStorage) + }).ShouldNot(Succeed()) + + nnfNode := &nnfv1alpha1.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: nodeName, + }, + } + Expect(k8sClient.Delete(context.TODO(), nnfNode)).To(Succeed()) + tempNnfNode := &nnfv1alpha1.NnfNode{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNode), tempNnfNode) + }).ShouldNot(Succeed()) + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + }, + } + Expect(k8sClient.Delete(context.TODO(), node)).To(Succeed()) + tempNode := &corev1.Node{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(node), tempNode) + }).ShouldNot(Succeed()) + + } + + config := &dwsv1alpha2.SystemConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: corev1.NamespaceDefault, + }, + } + + Expect(k8sClient.Delete(context.TODO(), config)).To(Succeed()) + tempConfig := &dwsv1alpha2.SystemConfiguration{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(config), tempConfig) + }).ShouldNot(Succeed()) }) It("Testing DWS directives", func() { @@ -1493,7 +1547,7 @@ var _ = Describe("Integration Test", func() { nnfstorage := &nnfv1alpha1.NnfStorage{} Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(dbdServer), nnfstorage)).To(Succeed()) for _, comp := range nnfstorage.Spec.AllocationSets { - Expect(comp.ExternalMgsNid).To(Equal(desiredNid)) + Expect(comp.MgsAddress).To(Equal(desiredNid)) } } diff --git a/internal/controller/metrics/metrics.go b/internal/controller/metrics/metrics.go index bee4e340a..6854ebadb 100644 --- a/internal/controller/metrics/metrics.go +++ b/internal/controller/metrics/metrics.go @@ -55,6 +55,13 @@ var ( }, ) + NnfNodeBlockStorageReconcilesTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "nnf_node_block_storage_reconciles_total", + Help: "Number of total reconciles in nnf_node_block_storage controller", + }, + ) + NnfPersistentStorageReconcilesTotal = prometheus.NewCounter( prometheus.CounterOpts{ Name: "nnf_persistent_storage_reconciles_total", @@ -92,6 +99,7 @@ func init() { metrics.Registry.MustRegister(NnfNodeReconcilesTotal) metrics.Registry.MustRegister(NnfNodeECDataReconcilesTotal) metrics.Registry.MustRegister(NnfNodeStorageReconcilesTotal) + metrics.Registry.MustRegister(NnfNodeBlockStorageReconcilesTotal) metrics.Registry.MustRegister(NnfPersistentStorageReconcilesTotal) metrics.Registry.MustRegister(NnfServersReconcilesTotal) metrics.Registry.MustRegister(NnfStorageReconcilesTotal) diff --git a/internal/controller/nnf_access_controller.go b/internal/controller/nnf_access_controller.go index b1dadc3b4..871bf4ff8 100644 --- a/internal/controller/nnf_access_controller.go +++ b/internal/controller/nnf_access_controller.go @@ -30,6 +30,7 @@ import ( "time" "github.com/go-logr/logr" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" @@ -129,7 +130,7 @@ func (r *NnfAccessReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{RequeueAfter: time.Second}, nil } - err = r.removeNodeStorageEndpoints(ctx, access, storageMapping) + err = r.removeBlockStorageAccess(ctx, access, storageMapping) if err != nil { return ctrl.Result{}, err } @@ -216,7 +217,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf } // Add compute node information to the storage map, if necessary. - err = r.addNodeStorageEndpoints(ctx, access, storageMapping) + err = r.addBlockStorageAccess(ctx, access, storageMapping) if err != nil { if apierrors.IsConflict(err) { return &ctrl.Result{}, nil @@ -235,7 +236,7 @@ func (r *NnfAccessReconciler) mount(ctx context.Context, access *nnfv1alpha1.Nnf return nil, dwsv1alpha2.NewResourceError("unable to create ClientMount resources").WithError(err) } - ready, err := r.getNodeStorageEndpointStatus(ctx, access, storageMapping) + ready, err := r.getBlockStorageAccessStatus(ctx, access, storageMapping) if err != nil { return nil, dwsv1alpha2.NewResourceError("unable to check endpoints for NnfNodeStorage").WithError(err) } @@ -276,7 +277,7 @@ func (r *NnfAccessReconciler) unmount(ctx context.Context, access *nnfv1alpha1.N return &ctrl.Result{RequeueAfter: time.Second}, nil } - err = r.removeNodeStorageEndpoints(ctx, access, storageMapping) + err = r.removeBlockStorageAccess(ctx, access, storageMapping) if err != nil { return nil, dwsv1alpha2.NewResourceError("unable to remove NnfNodeStorage endpoints").WithError(err) } @@ -450,7 +451,7 @@ func (r *NnfAccessReconciler) getClientListFromStorageReference(ctx context.Cont clients := []string{} for _, allocationSetSpec := range nnfStorage.Spec.AllocationSets { if nnfStorage.Spec.FileSystemType == "lustre" { - if allocationSetSpec.NnfStorageLustreSpec.TargetType != "OST" { + if allocationSetSpec.NnfStorageLustreSpec.TargetType != "ost" { continue } } @@ -507,12 +508,6 @@ func (r *NnfAccessReconciler) mapClientStorage(ctx context.Context, access *nnfv // mount information func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, access *nnfv1alpha1.NnfAccess, clients []string, nnfStorage *nnfv1alpha1.NnfStorage, setIndex int) (map[string][]dwsv1alpha2.ClientMountInfo, error) { allocationSet := nnfStorage.Spec.AllocationSets[setIndex] - - if allocationSet.ExternalMgsNid == "" && allocationSet.TargetType != "MGT" && allocationSet.TargetType != "MGTMDT" { - // Look elsewhere for the MGS NID. - return nil, nil - } - storageMapping := make(map[string][]dwsv1alpha2.ClientMountInfo) for _, client := range clients { @@ -523,11 +518,7 @@ func (r *NnfAccessReconciler) mapClientNetworkStorage(ctx context.Context, acces mountInfo.Device.Type = dwsv1alpha2.ClientMountDeviceTypeLustre mountInfo.Device.Lustre = &dwsv1alpha2.ClientMountDeviceLustre{} mountInfo.Device.Lustre.FileSystemName = allocationSet.FileSystemName - if allocationSet.ExternalMgsNid != "" { - mountInfo.Device.Lustre.MgsAddresses = allocationSet.ExternalMgsNid - } else { - mountInfo.Device.Lustre.MgsAddresses = nnfStorage.Status.MgsNode - } + mountInfo.Device.Lustre.MgsAddresses = nnfStorage.Status.MgsAddress // Make it easy for the nnf-dm daemon to find the NnfStorage. mountInfo.Device.DeviceReference = &dwsv1alpha2.ClientMountDeviceReference{ @@ -644,13 +635,6 @@ func (r *NnfAccessReconciler) mapClientLocalStorage(ctx context.Context, access mountInfo.Device.LVM.VolumeGroup = nnfNodeStorage.Status.Allocations[i].VolumeGroup mountInfo.Device.LVM.LogicalVolume = nnfNodeStorage.Status.Allocations[i].LogicalVolume mountInfo.Device.LVM.DeviceType = dwsv1alpha2.ClientMountLVMDeviceTypeNVMe - for _, nvme := range nnfNodeStorage.Status.Allocations[i].NVMeList { - nvmeDesc := dwsv1alpha2.ClientMountNVMeDesc{} - nvmeDesc.DeviceSerial = nvme.DeviceSerial - nvmeDesc.NamespaceID = nvme.NamespaceID - nvmeDesc.NamespaceGUID = nvme.NamespaceGUID - mountInfo.Device.LVM.NVMeInfo = append(mountInfo.Device.LVM.NVMeInfo, nvmeDesc) - } } existingStorage[nnfNodeStorage.Namespace] = append(existingStorage[nnfNodeStorage.Namespace], mountInfo) @@ -724,7 +708,7 @@ type mountReference struct { // addNodeStorageEndpoints adds the compute node information to the NnfNodeStorage resource // so it can make the NVMe namespaces accessible on the compute node. This is done on the rabbit // by creating StorageGroup resources through swordfish for the correct endpoint. -func (r *NnfAccessReconciler) addNodeStorageEndpoints(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) addBlockStorageAccess(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be added for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -754,43 +738,44 @@ func (r *NnfAccessReconciler) addNodeStorageEndpoints(ctx context.Context, acces } } - // Loop through the NnfNodeStorages and add clientEndpoint information for each of the + // Loop through the NnfNodeStorages and add client access information for each of the // computes that need access to an allocation. - for nodeStorageReference, mountRefList := range nodeStorageMap { + for nodeBlockStorageReference, mountRefList := range nodeStorageMap { namespacedName := types.NamespacedName{ - Name: nodeStorageReference.Name, - Namespace: nodeStorageReference.Namespace, + Name: nodeBlockStorageReference.Name, + Namespace: nodeBlockStorageReference.Namespace, } - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{} - err := r.Get(ctx, namespacedName, nnfNodeStorage) + nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} + err := r.Get(ctx, namespacedName, nnfNodeBlockStorage) if err != nil { return err } - oldNnfNodeStorage := *nnfNodeStorage.DeepCopy() - + oldNnfNodeBlockStorage := *nnfNodeBlockStorage.DeepCopy() // The clientEndpoints field is an array of each of the allocations on the Rabbit // node that holds a list of the endpoints to expose the allocation to. The endpoints // are the swordfish endpoints, so 0 is the rabbit, and 1-16 are the computes. Start out // by clearing all compute node endpoints from the allocations. - for i := range nnfNodeStorage.Spec.ClientEndpoints { - nnfNodeStorage.Spec.ClientEndpoints[i].NodeNames = nnfNodeStorage.Spec.ClientEndpoints[i].NodeNames[:1] + for i := range nnfNodeBlockStorage.Spec.Allocations { + nnfNodeBlockStorage.Spec.Allocations[i].Access = []string{nnfNodeBlockStorage.Namespace} } // Add compute node endpoints for each of the allocations. Increment the compute node // index found from the "storage" resource to account for the 0 index being the rabbit // in swordfish. for _, mountRef := range mountRefList { - clientEndpoints := &nnfNodeStorage.Spec.ClientEndpoints[mountRef.allocationIndex].NodeNames - *clientEndpoints = append(*clientEndpoints, mountRef.client) + // Add the client name to the access list if it's not already there + if slices.IndexFunc(nnfNodeBlockStorage.Spec.Allocations[mountRef.allocationIndex].Access, func(n string) bool { return n == mountRef.client }) < 0 { + nnfNodeBlockStorage.Spec.Allocations[mountRef.allocationIndex].Access = append(nnfNodeBlockStorage.Spec.Allocations[mountRef.allocationIndex].Access, mountRef.client) + } } - if reflect.DeepEqual(oldNnfNodeStorage, *nnfNodeStorage) { + if reflect.DeepEqual(oldNnfNodeBlockStorage, *nnfNodeBlockStorage) { continue } - if err = r.Update(ctx, nnfNodeStorage); err != nil { + if err = r.Update(ctx, nnfNodeBlockStorage); err != nil { return err } } @@ -798,7 +783,7 @@ func (r *NnfAccessReconciler) addNodeStorageEndpoints(ctx context.Context, acces return nil } -func (r *NnfAccessReconciler) getNodeStorageEndpointStatus(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { +func (r *NnfAccessReconciler) getBlockStorageAccessStatus(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) (bool, error) { // NnfNodeStorage clientReferences only need to be checked for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { @@ -850,14 +835,14 @@ func (r *NnfAccessReconciler) getNodeStorageEndpointStatus(ctx context.Context, // removeNodeStorageEndpoints modifies the NnfNodeStorage resources to remove the client endpoints for the // compute nodes that had mounted the storage. This causes NnfNodeStorage to remove the StorageGroups for // those compute nodes and remove access to the NVMe namespaces from the computes. -func (r *NnfAccessReconciler) removeNodeStorageEndpoints(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { +func (r *NnfAccessReconciler) removeBlockStorageAccess(ctx context.Context, access *nnfv1alpha1.NnfAccess, storageMapping map[string][]dwsv1alpha2.ClientMountInfo) error { // NnfNodeStorage clientReferences only need to be removed for compute nodes. If // this nnfAccess is not for compute nodes, then there's no work to do. if access.Spec.ClientReference == (corev1.ObjectReference{}) { return nil } - nodeStorageMap := make(map[corev1.ObjectReference]bool) + nodeBlockStorageMap := make(map[corev1.ObjectReference]bool) // Make a map of NnfNodeStorage references that were mounted by this // nnfAccess @@ -867,25 +852,25 @@ func (r *NnfAccessReconciler) removeNodeStorageEndpoints(ctx context.Context, ac continue } - if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeStorage{}).Name() { + if mount.Device.DeviceReference.ObjectReference.Kind != reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name() { continue } - nodeStorageMap[mount.Device.DeviceReference.ObjectReference] = true + nodeBlockStorageMap[mount.Device.DeviceReference.ObjectReference] = true } } - // Update each of the NnfNodeStorage resources to remove the clientEndpoints that - // were added earlier. Leave the first endpoint since that corresponds to the + // Update each of the NnfNodeBlockStorage resources to remove the access that + // was added earlier. Leave the first entry since that corresponds to the // rabbit node. - for nodeStorageReference := range nodeStorageMap { + for nodeBlockStorageReference := range nodeBlockStorageMap { namespacedName := types.NamespacedName{ - Name: nodeStorageReference.Name, - Namespace: nodeStorageReference.Namespace, + Name: nodeBlockStorageReference.Name, + Namespace: nodeBlockStorageReference.Namespace, } - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{} - err := r.Get(ctx, namespacedName, nnfNodeStorage) + nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} + err := r.Get(ctx, namespacedName, nnfNodeBlockStorage) if err != nil { if apierrors.IsNotFound(err) { continue @@ -893,17 +878,16 @@ func (r *NnfAccessReconciler) removeNodeStorageEndpoints(ctx context.Context, ac return err } - oldNnfNodeStorage := *nnfNodeStorage.DeepCopy() + oldNnfNodeBlockStorage := *nnfNodeBlockStorage.DeepCopy() - for i := range nnfNodeStorage.Spec.ClientEndpoints { - nnfNodeStorage.Spec.ClientEndpoints[i].NodeNames = nnfNodeStorage.Spec.ClientEndpoints[i].NodeNames[:1] + for i := range nnfNodeBlockStorage.Spec.Allocations { + nnfNodeBlockStorage.Spec.Allocations[i].Access = nnfNodeBlockStorage.Spec.Allocations[i].Access[:1] } - - if reflect.DeepEqual(oldNnfNodeStorage, *nnfNodeStorage) { + if reflect.DeepEqual(oldNnfNodeBlockStorage, *nnfNodeBlockStorage) { continue } - err = r.Update(ctx, nnfNodeStorage) + err = r.Update(ctx, nnfNodeBlockStorage) if err != nil { return err } diff --git a/internal/controller/nnf_access_controller_test.go b/internal/controller/nnf_access_controller_test.go index 2dfbc669f..8e44a7c99 100644 --- a/internal/controller/nnf_access_controller_test.go +++ b/internal/controller/nnf_access_controller_test.go @@ -22,6 +22,7 @@ package controller import ( "context" "reflect" + "sync" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,11 +42,129 @@ var _ = Describe("Access Controller Test", func() { "rabbit-nnf-access-test-node-1", "rabbit-nnf-access-test-node-2"} + nnfNodes := [2]*nnfv1alpha1.NnfNode{} + storages := [2]*dwsv1alpha2.Storage{} + nodes := [2]*corev1.Node{} + + var systemConfiguration *dwsv1alpha2.SystemConfiguration + var storageProfile *nnfv1alpha1.NnfStorageProfile + var setup sync.Once + BeforeEach(func() { - for _, nodeName := range nodeNames { - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - Expect(k8sClient.Create(context.TODO(), ns)).To(Succeed(), "Create Namespace") + setup.Do(func() { + for _, nodeName := range nodeNames { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + Expect(k8sClient.Create(context.TODO(), ns)).To(Succeed(), "Create Namespace") + } + }) + + systemConfiguration = &dwsv1alpha2.SystemConfiguration{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: corev1.NamespaceDefault, + }, + Spec: dwsv1alpha2.SystemConfigurationSpec{ + StorageNodes: []dwsv1alpha2.SystemConfigurationStorageNode{ + dwsv1alpha2.SystemConfigurationStorageNode{ + Type: "Rabbit", + Name: "rabbit-nnf-access-test-node-1", + }, + dwsv1alpha2.SystemConfigurationStorageNode{ + Type: "Rabbit", + Name: "rabbit-nnf-access-test-node-2", + }, + }, + }, } + Expect(k8sClient.Create(context.TODO(), systemConfiguration)).To(Succeed()) + + for i, nodeName := range nodeNames { + // Create the node - set it to up as ready + nodes[i] = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + Labels: map[string]string{ + "cray.nnf.node": "true", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Status: corev1.ConditionTrue, + Type: corev1.NodeReady, + }, + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), nodes[i])).To(Succeed()) + + nnfNodes[i] = &nnfv1alpha1.NnfNode{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: nodeName, + }, + Spec: nnfv1alpha1.NnfNodeSpec{ + State: nnfv1alpha1.ResourceEnable, + }, + } + Expect(k8sClient.Create(context.TODO(), nnfNodes[i])).To(Succeed()) + + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), nnfNodes[i])).To(Succeed()) + nnfNodes[i].Status.LNetNid = "1.2.3.4@tcp0" + return k8sClient.Update(context.TODO(), nnfNodes[i]) + }).Should(Succeed(), "set LNet Nid in NnfNode") + + storages[i] = &dwsv1alpha2.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: corev1.NamespaceDefault, + }, + } + + Expect(k8sClient.Create(context.TODO(), storages[i])).To(Succeed()) + } + + // Create a default NnfStorageProfile for the unit tests. + storageProfile = createBasicDefaultNnfStorageProfile() + }) + + AfterEach(func() { + Expect(k8sClient.Delete(context.TODO(), storageProfile)).To(Succeed()) + profExpected := &nnfv1alpha1.NnfStorageProfile{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) + }).ShouldNot(Succeed()) + + for i := range nodeNames { + Expect(k8sClient.Delete(context.TODO(), storages[i])).To(Succeed()) + tempStorage := &dwsv1alpha2.Storage{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storages[i]), tempStorage) + }).ShouldNot(Succeed()) + + Expect(k8sClient.Delete(context.TODO(), nnfNodes[i])).To(Succeed()) + tempNnfNode := &nnfv1alpha1.NnfNode{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNodes[i]), tempNnfNode) + }).ShouldNot(Succeed()) + + Expect(k8sClient.Delete(context.TODO(), nodes[i])).To(Succeed()) + tempNode := &corev1.Node{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nodes[i]), tempNode) + }).ShouldNot(Succeed()) + } + + Expect(k8sClient.Delete(context.TODO(), systemConfiguration)).To(Succeed()) + tempConfig := &dwsv1alpha2.SystemConfiguration{} + Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present + return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(systemConfiguration), tempConfig) + }).ShouldNot(Succeed()) }) Describe("Create Client Mounts", func() { @@ -69,37 +188,36 @@ var _ = Describe("Access Controller Test", func() { FileSystemType: "lustre", AllocationSets: []nnfv1alpha1.NnfStorageAllocationSetSpec{ { - Name: "mgtmdt", + Name: "mgtmdt", + Capacity: 50000000000, NnfStorageLustreSpec: nnfv1alpha1.NnfStorageLustreSpec{ - FileSystemName: "MGTMDT", - TargetType: "MGTMDT", + FileSystemName: "mgtmdt", + TargetType: "mgtmdt", }, Nodes: []nnfv1alpha1.NnfStorageAllocationNodes{ { Count: 1, - Name: corev1.NamespaceDefault, + Name: nodeNames[0], }, }, }, { - Name: "ost", + Name: "ost", + Capacity: 50000000000, NnfStorageLustreSpec: nnfv1alpha1.NnfStorageLustreSpec{ - FileSystemName: "OST", - TargetType: "OST", + FileSystemName: "ost", + TargetType: "ost", }, Nodes: allocationNodes, }, }, }, - Status: nnfv1alpha1.NnfStorageStatus{ - MgsNode: "127.0.0.1@tcp", - }, } Expect(k8sClient.Create(context.TODO(), storage)).To(Succeed(), "Create NNF Storage") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storage), storage)).To(Succeed()) - storage.Status.MgsNode = "127.0.0.1@tcp" + storage.Status.MgsAddress = "127.0.0.1@tcp" return k8sClient.Status().Update(context.TODO(), storage) }).Should(Succeed()) @@ -123,6 +241,8 @@ var _ = Describe("Access Controller Test", func() { }, } + addPinnedStorageProfileLabel(access, storageProfile) + Expect(k8sClient.Create(context.TODO(), access)).To(Succeed(), "Create NNF Access") By("Verify NNF Access goes Ready in mounted state") diff --git a/internal/controller/nnf_clientmount_controller.go b/internal/controller/nnf_clientmount_controller.go index 8e483777d..3df6d761e 100644 --- a/internal/controller/nnf_clientmount_controller.go +++ b/internal/controller/nnf_clientmount_controller.go @@ -27,21 +27,18 @@ import ( "time" "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/mount-utils" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" - sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" ) @@ -63,6 +60,7 @@ type NnfClientMountReconciler struct { //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=clientmounts,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=clientmounts/status,verbs=get;update;patch //+kubebuilder:rbac:groups=dataworkflowservices.github.io,resources=clientmounts/finalizers,verbs=update +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -84,17 +82,6 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() defer func() { clientMount.Status.SetResourceErrorAndLog(err, log) }() - // Ensure the NNF Storage Service is running prior to taking any action. - ss := nnf.NewDefaultStorageService() - storageService := &sf.StorageServiceV150StorageService{} - if err := ss.StorageServiceIdGet(ss.Id(), storageService); err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to get redfish storage service status").WithError(err).WithMajor() - } - - if storageService.Status.State != sf.ENABLED_RST { - return ctrl.Result{RequeueAfter: 1 * time.Second}, nil - } - // Handle cleanup if the resource is being deleted if !clientMount.GetDeletionTimestamp().IsZero() { if !controllerutil.ContainsFinalizer(clientMount, finalizerNnfClientMount) { @@ -163,17 +150,15 @@ func (r *NnfClientMountReconciler) Reconcile(ctx context.Context, req ctrl.Reque // changeMmountAll mounts or unmounts all the file systems listed in the spec.Mounts list func (r *NnfClientMountReconciler) changeMountAll(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, state dwsv1alpha2.ClientMountState) error { - log := r.Log.WithValues("ClientMount", types.NamespacedName{Name: clientMount.Name, Namespace: clientMount.Namespace}) - var firstError error - for i, mount := range clientMount.Spec.Mounts { + for i := range clientMount.Spec.Mounts { var err error switch state { case dwsv1alpha2.ClientMountStateMounted: - err = r.changeMount(ctx, mount, true, log) + err = r.changeMount(ctx, clientMount, i, true) case dwsv1alpha2.ClientMountStateUnmounted: - err = r.changeMount(ctx, mount, false, log) + err = r.changeMount(ctx, clientMount, i, false) default: return dwsv1alpha2.NewResourceError("invalid desired state %s", state).WithFatal() } @@ -192,138 +177,80 @@ func (r *NnfClientMountReconciler) changeMountAll(ctx context.Context, clientMou } // changeMount mount or unmounts a single mount point described in the ClientMountInfo object -func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMountInfo dwsv1alpha2.ClientMountInfo, shouldMount bool, log logr.Logger) error { - - if os.Getenv("ENVIRONMENT") == "kind" { - if shouldMount { - if err := os.MkdirAll(clientMountInfo.MountPath, 0755); err != nil { - return dwsv1alpha2.NewResourceError("make directory failed: %s", clientMountInfo.MountPath).WithError(err).WithMajor() - } - - log.Info("Fake mounted file system", "Mount path", clientMountInfo.MountPath) - } else { - // Return if the directory was already removed - if _, err := os.Stat(clientMountInfo.MountPath); os.IsNotExist(err) { - return nil - } - - if err := os.RemoveAll(clientMountInfo.MountPath); err != nil { - return dwsv1alpha2.NewResourceError("remove directory failed: %s", clientMountInfo.MountPath).WithError(err).WithMajor() - } +func (r *NnfClientMountReconciler) changeMount(ctx context.Context, clientMount *dwsv1alpha2.ClientMount, index int, shouldMount bool) error { + log := r.Log.WithValues("ClientMount", client.ObjectKeyFromObject(clientMount), "index", clientMount.Spec.Mounts[index].Device.DeviceReference.Data) - log.Info("Fake unmounted file system", "Mount path", clientMountInfo.MountPath) - } - - if clientMountInfo.SetPermissions { - if err := os.Chown(clientMountInfo.MountPath, int(clientMountInfo.UserID), int(clientMountInfo.GroupID)); err != nil { - return dwsv1alpha2.NewResourceError("chown failed: %s", clientMountInfo.MountPath).WithError(err).WithMajor() - } - } + clientMountInfo := clientMount.Spec.Mounts[index] + nnfNodeStorage := r.fakeNnfNodeStorage(clientMount, index) - return nil + _, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, clientMountInfo.Device.DeviceReference.Data, log) + if err != nil { + return dwsv1alpha2.NewResourceError("unable to get file system information").WithError(err).WithMajor() } - switch clientMountInfo.Device.Type { - case dwsv1alpha2.ClientMountDeviceTypeLustre: - mountPath := clientMountInfo.MountPath - - _, testEnv := os.LookupEnv("NNF_TEST_ENVIRONMENT") - - var mounter mount.Interface - if testEnv { - mounter = mount.NewFakeMounter([]mount.MountPoint{}) - } else { - mounter = mount.New("") - } - - isNotMountPoint, _ := mount.IsNotMountPoint(mounter, mountPath) - - if shouldMount { - if isNotMountPoint { - - mountSource := clientMountInfo.Device.Lustre.MgsAddresses + - ":/" + - clientMountInfo.Device.Lustre.FileSystemName - - if !testEnv { - if err := os.MkdirAll(mountPath, 0755); err != nil { - return dwsv1alpha2.NewResourceError("make directory failed: %s", mountPath).WithError(err).WithMajor() - } - } - - if err := mounter.Mount(mountSource, mountPath, "lustre", nil); err != nil { - return dwsv1alpha2.NewResourceError("unable to mount file system").WithError(err).WithMajor() - } - } - } else { - if !isNotMountPoint { - if err := mounter.Unmount(mountPath); err != nil { - return dwsv1alpha2.NewResourceError("unable to unmount file system").WithError(err).WithMajor() - } - } + if shouldMount { + mounted, err := fileSystem.Mount(ctx, clientMountInfo.MountPath, clientMountInfo.Options, clientMount.Status.Mounts[index].Ready) + if err != nil { + return dwsv1alpha2.NewResourceError("unable to mount file system").WithError(err).WithMajor() } - - case dwsv1alpha2.ClientMountDeviceTypeReference: - - namespacedName := types.NamespacedName{ - Name: clientMountInfo.Device.DeviceReference.ObjectReference.Name, - Namespace: clientMountInfo.Device.DeviceReference.ObjectReference.Namespace, + if mounted { + log.Info("Mounted file system", "Mount path", clientMountInfo.MountPath) } - nodeStorage := &nnfv1alpha1.NnfNodeStorage{} - if err := r.Get(ctx, namespacedName, nodeStorage); err != nil { - return err + if clientMount.Spec.Mounts[index].SetPermissions { + if err := os.Chown(clientMountInfo.MountPath, int(clientMount.Spec.Mounts[index].UserID), int(clientMount.Spec.Mounts[index].GroupID)); err != nil { + return dwsv1alpha2.NewResourceError("unable to set owner and group for file system").WithError(err).WithMajor() + } } - - allocationStatus := nodeStorage.Status.Allocations[clientMountInfo.Device.DeviceReference.Data] - fileShare, err := r.getFileShare(allocationStatus.FileSystem.ID, allocationStatus.FileShare.ID) + } else { + unmounted, err := fileSystem.Unmount(ctx, clientMountInfo.MountPath) if err != nil { - return dwsv1alpha2.NewResourceError("could not get file share").WithError(err).WithMajor() + return dwsv1alpha2.NewResourceError("unable to unmount file system").WithError(err).WithMajor() } - - if shouldMount { - fileShare.FileSharePath = clientMountInfo.MountPath - } else { - fileShare.FileSharePath = "" + if unmounted { + log.Info("Unmounted file system", "Mount path", clientMountInfo.MountPath) } - - fileShare, err = r.updateFileShare(allocationStatus.FileSystem.ID, fileShare) - if err != nil { - return dwsv1alpha2.NewResourceError("could not update file share").WithError(err).WithMajor() - } - - default: - return dwsv1alpha2.NewResourceError("invalid device type %s", clientMountInfo.Device.Type).WithFatal() - } - - if shouldMount { - log.Info("Mounted file system", "Mount path", clientMountInfo.MountPath) - } else { - log.Info("Unmounted file system", "Mount path", clientMountInfo.MountPath) } return nil } -func (r *NnfClientMountReconciler) updateFileShare(fileSystemId string, fileShare *sf.FileShareV120FileShare) (*sf.FileShareV120FileShare, error) { - ss := nnf.NewDefaultStorageService() - - if err := ss.StorageServiceIdFileSystemIdExportedShareIdPut(ss.Id(), fileSystemId, fileShare.Id, fileShare); err != nil { - return nil, err +// fakeNnfNodeStorage creates an NnfNodeStorage resource with filled in with only the fields +// that are necessary to mount the file system. This is done to reduce the API server load +// because the compute nodes don't need to Get() the actual NnfNodeStorage. +func (r *NnfClientMountReconciler) fakeNnfNodeStorage(clientMount *dwsv1alpha2.ClientMount, index int) *nnfv1alpha1.NnfNodeStorage { + nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Name, + Namespace: clientMount.Spec.Mounts[index].Device.DeviceReference.ObjectReference.Namespace, + }, } - return fileShare, nil -} + // These labels aren't exactly right (NnfStorage owns NnfNodeStorage), but the + // labels that are important for doing the mount are there and correct + dwsv1alpha2.InheritParentLabels(nnfNodeStorage, clientMount) -func (r *NnfClientMountReconciler) getFileShare(fileSystemId string, fileShareId string) (*sf.FileShareV120FileShare, error) { - ss := nnf.NewDefaultStorageService() - sh := &sf.FileShareV120FileShare{} + nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ + Name: "fake", + Namespace: "fake", + Kind: "fake", + } + + nnfNodeStorage.Spec.UserID = clientMount.Spec.Mounts[index].UserID + nnfNodeStorage.Spec.GroupID = clientMount.Spec.Mounts[index].GroupID + nnfNodeStorage.Spec.FileSystemType = clientMount.Spec.Mounts[index].Type + if nnfNodeStorage.Spec.FileSystemType == "none" { + nnfNodeStorage.Spec.FileSystemType = "raw" + } - if err := ss.StorageServiceIdFileSystemIdExportedShareIdGet(ss.Id(), fileSystemId, fileShareId, sh); err != nil { - return nil, err + if clientMount.Spec.Mounts[index].Type == "lustre" { + nnfNodeStorage.Spec.LustreStorage.BackFs = "none" + nnfNodeStorage.Spec.LustreStorage.TargetType = "ost" + nnfNodeStorage.Spec.LustreStorage.FileSystemName = clientMount.Spec.Mounts[index].Device.Lustre.FileSystemName + nnfNodeStorage.Spec.LustreStorage.MgsAddress = clientMount.Spec.Mounts[index].Device.Lustre.MgsAddresses } - return sh, nil + return nnfNodeStorage } func filterByRabbitNamespacePrefixForTest() predicate.Predicate { diff --git a/internal/controller/nnf_node_block_storage_controller.go b/internal/controller/nnf_node_block_storage_controller.go new file mode 100644 index 000000000..c999e0c72 --- /dev/null +++ b/internal/controller/nnf_node_block_storage_controller.go @@ -0,0 +1,535 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + ec "github.com/NearNodeFlash/nnf-ec/pkg/ec" + nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" + nnfnvme "github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme" + openapi "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/common" + sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" + + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + "github.com/DataWorkflowServices/dws/utils/updater" + nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + "github.com/NearNodeFlash/nnf-sos/internal/controller/metrics" + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice/nvme" +) + +const ( + // finalizerNnfNodeBlockStorage defines the key used in identifying the + // storage object as being owned by this NNF Storage Reconciler. This + // prevents the system from deleting the custom resource until the + // reconciler has finished using the resource. + finalizerNnfNodeBlockStorage = "nnf.cray.hpe.com/nnf_node_block_storage" +) + +// NnfNodeBlockStorageReconciler contains the elements needed during reconciliation for NnfNodeBlockStorage +type NnfNodeBlockStorageReconciler struct { + client.Client + Log logr.Logger + Scheme *kruntime.Scheme + + types.NamespacedName +} + +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodeblockstorages,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodeblockstorages/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodeblockstorages/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.2/pkg/reconcile +func (r *NnfNodeBlockStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + log := r.Log.WithValues("NnfNodeBlockStorage", req.NamespacedName) + metrics.NnfNodeBlockStorageReconcilesTotal.Inc() + + nodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{} + if err := r.Get(ctx, req.NamespacedName, nodeBlockStorage); err != nil { + // ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Ensure the NNF Storage Service is running prior to taking any action. + ss := nnf.NewDefaultStorageService() + storageService := &sf.StorageServiceV150StorageService{} + if err := ss.StorageServiceIdGet(ss.Id(), storageService); err != nil { + return ctrl.Result{}, err + } + + if storageService.Status.State != sf.ENABLED_RST { + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil + } + + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeBlockStorageStatus](nodeBlockStorage) + defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() + defer func() { nodeBlockStorage.Status.SetResourceErrorAndLog(err, log) }() + + if !nodeBlockStorage.GetDeletionTimestamp().IsZero() { + if !controllerutil.ContainsFinalizer(nodeBlockStorage, finalizerNnfNodeBlockStorage) { + return ctrl.Result{}, nil + } + + for i := range nodeBlockStorage.Status.Allocations { + // Release physical storage + result, err := r.deleteStorage(nodeBlockStorage, i) + if err != nil { + return ctrl.Result{Requeue: true}, nil + } + if result != nil { + return *result, nil + } + } + + controllerutil.RemoveFinalizer(nodeBlockStorage, finalizerNnfNodeBlockStorage) + if err := r.Update(ctx, nodeBlockStorage); err != nil { + if !apierrors.IsConflict(err) { + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{}, nil + } + + // Add the finalizer if it doesn't exist yet + if !controllerutil.ContainsFinalizer(nodeBlockStorage, finalizerNnfNodeBlockStorage) { + controllerutil.AddFinalizer(nodeBlockStorage, finalizerNnfNodeBlockStorage) + if err := r.Update(ctx, nodeBlockStorage); err != nil { + if !apierrors.IsConflict(err) { + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{}, nil + } + + // Initialize the status section with empty allocation statuses. + if len(nodeBlockStorage.Status.Allocations) == 0 { + nodeBlockStorage.Status.Allocations = make([]nnfv1alpha1.NnfNodeBlockStorageAllocationStatus, len(nodeBlockStorage.Spec.Allocations)) + for i := range nodeBlockStorage.Status.Allocations { + nodeBlockStorage.Status.Allocations[i].Accesses = make(map[string]nnfv1alpha1.NnfNodeBlockStorageAccessStatus) + } + + return ctrl.Result{}, nil + } + + // Loop through each allocation and create the storage + for i := range nodeBlockStorage.Spec.Allocations { + // Allocate physical storage + result, err := r.allocateStorage(nodeBlockStorage, i) + if err != nil { + return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to allocate NVMe namespaces for allocation %v", i).WithError(err).WithMajor() + } + if result != nil { + return *result, nil + } + + // Create a block device in /dev that is accessible on the Rabbit node + result, err = r.createBlockDevice(ctx, nodeBlockStorage, i) + if err != nil { + return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to attache NVMe namespace to node for allocation %v", i).WithError(err).WithMajor() + } + if result != nil { + return *result, nil + } + } + + nodeBlockStorage.Status.Ready = true + + return ctrl.Result{}, nil +} + +func (r *NnfNodeBlockStorageReconciler) allocateStorage(nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) + + ss := nnf.NewDefaultStorageService() + nvmeSS := nnfnvme.NewDefaultStorageService() + + allocationStatus := &nodeBlockStorage.Status.Allocations[index] + + storagePoolID := fmt.Sprintf("%s-%d", nodeBlockStorage.Name, index) + sp, err := r.createStoragePool(ss, storagePoolID, nodeBlockStorage.Spec.Allocations[index].Capacity) + if err != nil { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create storage pool").WithError(err).WithMajor() + + } + + vc := &sf.VolumeCollectionVolumeCollection{} + if err := ss.StorageServiceIdStoragePoolIdCapacitySourceIdProvidingVolumesGet(ss.Id(), storagePoolID, "0", vc); err != nil { + return nil, err + } + + if len(allocationStatus.Devices) == 0 { + allocationStatus.Devices = make([]nnfv1alpha1.NnfNodeBlockStorageDeviceStatus, len(vc.Members)) + } + + if len(allocationStatus.Devices) != len(vc.Members) { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("unexpected number of namespaces").WithFatal() + } + + for i, member := range vc.Members { + components := strings.Split(member.OdataId, "/") + storageId := components[4] + volumeId := components[6] + + storage := &sf.StorageV190Storage{} + if err := nvmeSS.StorageIdGet(storageId, storage); err != nil { + return nil, err + } + + volume := &sf.VolumeV161Volume{} + if err := nvmeSS.StorageIdVolumeIdGet(storageId, volumeId, volume); err != nil { + return nil, err + } + + allocationStatus.Devices[i].NQN = strings.Replace(storage.Identifiers[0].DurableName, "\u0000", "", -1) + allocationStatus.Devices[i].NamespaceId = volume.NVMeNamespaceProperties.NamespaceId + allocationStatus.Devices[i].CapacityAllocated = volume.CapacityBytes + } + + allocationStatus.CapacityAllocated = sp.CapacityBytes + + // If the SF ID is empty then we just created the resource. Save the ID in the NnfNodeBlockStorage + if len(allocationStatus.StoragePoolId) == 0 { + log.Info("Created storage pool", "Id", sp.Id) + allocationStatus.StoragePoolId = sp.Id + + return &ctrl.Result{}, nil + } + + return nil, nil +} + +func (r *NnfNodeBlockStorageReconciler) createBlockDevice(ctx context.Context, nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) + ss := nnf.NewDefaultStorageService() + + allocationStatus := &nodeBlockStorage.Status.Allocations[index] + + // Create a Storage Group if none is currently present. Recall that a Storage Group + // is a mapping from the Storage Pool to a Server Endpoint. Establishing a Storage + // Group makes block storage available on the server, which itself is a prerequisite to + // any file system built on top of the block storage. + + // Retrieve the collection of endpoints for us to map + serverEndpointCollection := &sf.EndpointCollectionEndpointCollection{} + if err := ss.StorageServiceIdEndpointsGet(ss.Id(), serverEndpointCollection); err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get service endpoint").WithError(err).WithFatal() + } + + // Get the Storage resource to map between compute node name and + // endpoint index. + namespacedName := types.NamespacedName{ + Name: nodeBlockStorage.Namespace, + Namespace: "default", + } + + storage := &dwsv1alpha2.Storage{} + err := r.Get(ctx, namespacedName, storage) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not read storage resource").WithError(err) + } + + // Build a list of all nodes with access to the storage + clients := []string{} + for _, server := range storage.Status.Access.Servers { + clients = append(clients, server.Name) + } + + for _, compute := range storage.Status.Access.Computes { + clients = append(clients, compute.Name) + } + + // Make a list of all the endpoints and set whether they need a storage group based + // on the list of clients specified in the ClientEndpoints array + accessList := make([]string, len(serverEndpointCollection.Members)) + for _, nodeName := range nodeBlockStorage.Spec.Allocations[index].Access { + for i, clientName := range clients { + if nodeName == clientName { + accessList[i] = nodeName + } + } + } + + // Loop through the list of endpoints and delete the StorageGroup for endpoints where + // access==false, and create the StorageGroup for endpoints where access==true + for clientIndex, nodeName := range accessList { + endpointRef := serverEndpointCollection.Members[clientIndex] + endpointID := endpointRef.OdataId[strings.LastIndex(endpointRef.OdataId, "/")+1:] + storageGroupId := fmt.Sprintf("%s-%d-%s", nodeBlockStorage.Name, index, endpointID) + + // If the endpoint doesn't need a storage group, remove one if it exists + if nodeName == "" { + if _, err := r.getStorageGroup(ss, storageGroupId); err != nil { + continue + } + + if err := r.deleteStorageGroup(ss, storageGroupId); err != nil { + return nil, dwsv1alpha2.NewResourceError("could not delete storage group").WithError(err).WithMajor() + } + + delete(allocationStatus.Accesses, nodeName) + + log.Info("Deleted storage group", "storageGroupId", storageGroupId) + } else { + // The kind environment doesn't support endpoints beyond the Rabbit + if os.Getenv("ENVIRONMENT") == "kind" && endpointID != os.Getenv("RABBIT_NODE") { + continue + } + + endPoint, err := r.getEndpoint(ss, endpointID) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not get endpoint").WithError(err).WithFatal() + } + + // Skip the endpoints that are not ready + if nnfv1alpha1.StaticResourceStatus(endPoint.Status) != nnfv1alpha1.ResourceReady { + continue + } + + sg, err := r.createStorageGroup(ss, storageGroupId, allocationStatus.StoragePoolId, endpointID) + if err != nil { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create storage group").WithError(err).WithMajor() + } + + if allocationStatus.Accesses == nil { + allocationStatus.Accesses = make(map[string]nnfv1alpha1.NnfNodeBlockStorageAccessStatus) + } + + // If the access status doesn't exist then we just created the resource. Save the ID in the NnfNodeBlockStorage + if _, ok := allocationStatus.Accesses[nodeName]; !ok { + log.Info("Created storage group", "Id", storageGroupId) + allocationStatus.Accesses[nodeName] = nnfv1alpha1.NnfNodeBlockStorageAccessStatus{StorageGroupId: sg.Id} + + return &ctrl.Result{}, nil + } + + // The device paths are discovered below. This is only relevant for the Rabbit node access + if nodeName != clients[0] { + return nil, nil + } + + // + _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") + if found || os.Getenv("ENVIRONMENT") == "kind" { + return nil, nil + } + + // Initialize the path array if it doesn't exist yet + if len(allocationStatus.Accesses[nodeName].DevicePaths) != len(allocationStatus.Devices) { + if access, ok := allocationStatus.Accesses[nodeName]; ok { + access.DevicePaths = make([]string, len(allocationStatus.Devices)) + allocationStatus.Accesses[nodeName] = access + } + } + + foundDevices, err := nvme.NvmeListDevices() + if err != nil { + return nil, err + } + + for i, allocatedDevice := range allocationStatus.Devices { + findMatchingNvmeDevice := func() string { + for _, foundDevice := range foundDevices { + if allocatedDevice.NQN == foundDevice.NQN && allocatedDevice.NamespaceId == strconv.FormatUint(uint64(foundDevice.NSID), 10) { + return foundDevice.DevicePath + } + } + + return "" + } + + path := findMatchingNvmeDevice() + if path == "" { + err := nvme.NvmeRescanDevices() + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not rescan devices after failing to find device path for %v", allocatedDevice).WithError(err).WithMajor() + } + + return nil, dwsv1alpha2.NewResourceError("could not find device path for %v", allocatedDevice).WithMajor() + } + + allocationStatus.Accesses[nodeName].DevicePaths[i] = path + } + } + } + + return nil, nil + +} + +func (r *NnfNodeBlockStorageReconciler) deleteStorage(nodeBlockStorage *nnfv1alpha1.NnfNodeBlockStorage, index int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfNodeBlockStorage", types.NamespacedName{Name: nodeBlockStorage.Name, Namespace: nodeBlockStorage.Namespace}) + + ss := nnf.NewDefaultStorageService() + + allocationStatus := &nodeBlockStorage.Status.Allocations[index] + if allocationStatus.StoragePoolId == "" { + return nil, nil + } + + log.Info("Deleting storage pool", "Id", allocationStatus.StoragePoolId) + + err := r.deleteStoragePool(ss, allocationStatus.StoragePoolId) + if err != nil { + ecErr, ok := err.(*ec.ControllerError) + + // If the error is from a 404 error, then there's nothing to clean up and we + // assume everything has been deleted + if !ok || ecErr.StatusCode() != http.StatusNotFound { + nodeBlockStorage.Status.Error = dwsv1alpha2.NewResourceError("could not delete storage pool").WithError(err).WithFatal() + log.Info(nodeBlockStorage.Status.Error.Error()) + + return &ctrl.Result{Requeue: true}, nil + } + } + + return nil, nil +} + +func (r *NnfNodeBlockStorageReconciler) createStoragePool(ss nnf.StorageServiceApi, id string, capacity int64) (*sf.StoragePoolV150StoragePool, error) { + sp := &sf.StoragePoolV150StoragePool{ + Id: id, + CapacityBytes: capacity, + Oem: openapi.MarshalOem(nnf.AllocationPolicyOem{ + Policy: nnf.SpareAllocationPolicyType, + Compliance: nnf.RelaxedAllocationComplianceType, + }), + } + + if err := ss.StorageServiceIdStoragePoolIdPut(ss.Id(), id, sp); err != nil { + resourceErr := dwsv1alpha2.NewResourceError("could not allocate storage pool").WithError(err) + ecErr, ok := err.(*ec.ControllerError) + if ok { + switch ecErr.Cause() { + case "Insufficient capacity available": + return nil, resourceErr.WithUserMessage("insufficient capacity available").WithWLM().WithFatal() + default: + return nil, resourceErr + } + } + + return nil, resourceErr + } + + return sp, nil +} + +func (r *NnfNodeBlockStorageReconciler) getStoragePool(ss nnf.StorageServiceApi, id string) (*sf.StoragePoolV150StoragePool, error) { + sp := &sf.StoragePoolV150StoragePool{} + + if err := ss.StorageServiceIdStoragePoolIdGet(ss.Id(), id, sp); err != nil { + return nil, err + } + + return sp, nil +} + +func (r *NnfNodeBlockStorageReconciler) deleteStoragePool(ss nnf.StorageServiceApi, id string) error { + if err := ss.StorageServiceIdStoragePoolIdDelete(ss.Id(), id); err != nil { + return err + } + + return nil +} + +func (r *NnfNodeBlockStorageReconciler) getEndpoint(ss nnf.StorageServiceApi, id string) (*sf.EndpointV150Endpoint, error) { + ep := &sf.EndpointV150Endpoint{} + + if err := ss.StorageServiceIdEndpointIdGet(ss.Id(), id, ep); err != nil { + return nil, err + } + + return ep, nil +} + +func (r *NnfNodeBlockStorageReconciler) createStorageGroup(ss nnf.StorageServiceApi, id string, spID string, epID string) (*sf.StorageGroupV150StorageGroup, error) { + sp, err := r.getStoragePool(ss, spID) + if err != nil { + return nil, err + } + + ep, err := r.getEndpoint(ss, epID) + if err != nil { + return nil, err + } + + sg := &sf.StorageGroupV150StorageGroup{ + Id: id, + Links: sf.StorageGroupV150Links{ + StoragePool: sf.OdataV4IdRef{OdataId: sp.OdataId}, + ServerEndpoint: sf.OdataV4IdRef{OdataId: ep.OdataId}, + }, + } + + if err := ss.StorageServiceIdStorageGroupIdPut(ss.Id(), id, sg); err != nil { + return nil, err + } + + return sg, nil +} + +func (r *NnfNodeBlockStorageReconciler) getStorageGroup(ss nnf.StorageServiceApi, id string) (*sf.StorageGroupV150StorageGroup, error) { + sg := &sf.StorageGroupV150StorageGroup{} + + if err := ss.StorageServiceIdStorageGroupIdGet(ss.Id(), id, sg); err != nil { + return nil, err + } + + return sg, nil +} + +func (r *NnfNodeBlockStorageReconciler) deleteStorageGroup(ss nnf.StorageServiceApi, id string) error { + return ss.StorageServiceIdStorageGroupIdDelete(ss.Id(), id) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NnfNodeBlockStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { + // nnf-ec is not thread safe, so we are limited to a single reconcile thread. + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: 1}). + For(&nnfv1alpha1.NnfNodeBlockStorage{}). + Complete(r) +} diff --git a/internal/controller/nnf_node_controller.go b/internal/controller/nnf_node_controller.go index 1ad90f2a7..b5229f0b8 100644 --- a/internal/controller/nnf_node_controller.go +++ b/internal/controller/nnf_node_controller.go @@ -46,6 +46,7 @@ import ( nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" nvme "github.com/NearNodeFlash/nnf-ec/pkg/manager-nvme" sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" + "github.com/NearNodeFlash/nnf-sos/pkg/command" dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" @@ -307,6 +308,23 @@ func (r *NnfNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } } + _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") + if found || os.Getenv("ENVIRONMENT") == "kind" { + node.Status.LNetNid = "1.2.3.4@tcp" + return ctrl.Result{}, nil + } + + output, err := command.Run("lctl list_nids") + if err != nil { + return ctrl.Result{}, fmt.Errorf("Could not find local LNid: %w", err) + } + + for _, nid := range strings.Split(string(output), "\n") { + if strings.Contains(nid, "@") { + node.Status.LNetNid = nid + } + } + return ctrl.Result{}, nil } diff --git a/internal/controller/nnf_node_storage_controller.go b/internal/controller/nnf_node_storage_controller.go index 926ae17f4..5a68b4c64 100644 --- a/internal/controller/nnf_node_storage_controller.go +++ b/internal/controller/nnf_node_storage_controller.go @@ -21,32 +21,15 @@ package controller import ( "context" - "crypto/md5" - "fmt" - "net/http" - "os" - "strconv" - "strings" - "time" "github.com/go-logr/logr" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/mount-utils" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - ec "github.com/NearNodeFlash/nnf-ec/pkg/ec" - nnf "github.com/NearNodeFlash/nnf-ec/pkg/manager-nnf" - nnfserver "github.com/NearNodeFlash/nnf-ec/pkg/manager-server" - - openapi "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/common" - sf "github.com/NearNodeFlash/nnf-ec/pkg/rfsf/pkg/models" - dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" "github.com/DataWorkflowServices/dws/utils/updater" nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" @@ -70,9 +53,11 @@ type NnfNodeStorageReconciler struct { Scheme *kruntime.Scheme types.NamespacedName + ChildObjects []dwsv1alpha2.ObjectList } //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/status,verbs=get;update;patch //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -84,25 +69,14 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque log := r.Log.WithValues("NnfNodeStorage", req.NamespacedName) metrics.NnfNodeStorageReconcilesTotal.Inc() - nodeStorage := &nnfv1alpha1.NnfNodeStorage{} - if err := r.Get(ctx, req.NamespacedName, nodeStorage); err != nil { + nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{} + if err := r.Get(ctx, req.NamespacedName, nnfNodeStorage); err != nil { // ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them // on deleted requests. return ctrl.Result{}, client.IgnoreNotFound(err) } - // Ensure the NNF Storage Service is running prior to taking any action. - ss := nnf.NewDefaultStorageService() - storageService := &sf.StorageServiceV150StorageService{} - if err := ss.StorageServiceIdGet(ss.Id(), storageService); err != nil { - return ctrl.Result{}, err - } - - if storageService.Status.State != sf.ENABLED_RST { - return ctrl.Result{RequeueAfter: 1 * time.Second}, nil - } - // Use the Node Storage Status Updater to track updates to the storage status. // This ensures that only one call to r.Status().Update() is done even though we // update the status at several points in the process. We hijack the defer logic @@ -111,9 +85,9 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // so when we would normally call "return ctrl.Result{}, nil", at that time // "err" is nil - and if permitted we will update err with the result of // the r.Update() - statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeStorageStatus](nodeStorage) - defer func() { err = statusUpdater.CloseWithUpdate(ctx, r, err) }() - defer func() { nodeStorage.Status.SetResourceErrorAndLog(err, log) }() + statusUpdater := updater.NewStatusUpdater[*nnfv1alpha1.NnfNodeStorageStatus](nnfNodeStorage) + defer func() { err = statusUpdater.CloseWithStatusUpdate(ctx, r.Client.Status(), err) }() + defer func() { nnfNodeStorage.Status.SetResourceErrorAndLog(err, log) }() // Check if the object is being deleted. Deletion is carefully coordinated around // the NNF resources being managed by this NNF Node Storage resource. For a @@ -122,24 +96,25 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // File System, and File Shares). The Finalizer on this NNF Node Storage resource // is present until the underlying NNF resources are deleted through the // storage service. - if !nodeStorage.GetDeletionTimestamp().IsZero() { - if !controllerutil.ContainsFinalizer(nodeStorage, finalizerNnfNodeStorage) { + if !nnfNodeStorage.GetDeletionTimestamp().IsZero() { + if !controllerutil.ContainsFinalizer(nnfNodeStorage, finalizerNnfNodeStorage) { return ctrl.Result{}, nil } - for i := range nodeStorage.Status.Allocations { + for i := range nnfNodeStorage.Status.Allocations { // Release physical storage - result, err := r.deleteStorage(nodeStorage, i) + result, err := r.deleteAllocation(ctx, nnfNodeStorage, i) if err != nil { - return ctrl.Result{Requeue: true}, nil + + return ctrl.Result{}, err } if result != nil { return *result, nil } } - controllerutil.RemoveFinalizer(nodeStorage, finalizerNnfNodeStorage) - if err := r.Update(ctx, nodeStorage); err != nil { + controllerutil.RemoveFinalizer(nnfNodeStorage, finalizerNnfNodeStorage) + if err := r.Update(ctx, nnfNodeStorage); err != nil { if !apierrors.IsConflict(err) { return ctrl.Result{}, err } @@ -153,10 +128,11 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque // First time setup requires programming of the storage status such that the resource // is labeled as "Starting". After this is done, // the resource obtains a finalizer to manage the resource lifetime. - if !controllerutil.ContainsFinalizer(nodeStorage, finalizerNnfNodeStorage) { - controllerutil.AddFinalizer(nodeStorage, finalizerNnfNodeStorage) - if err := r.Update(ctx, nodeStorage); err != nil { + if !controllerutil.ContainsFinalizer(nnfNodeStorage, finalizerNnfNodeStorage) { + controllerutil.AddFinalizer(nnfNodeStorage, finalizerNnfNodeStorage) + if err := r.Update(ctx, nnfNodeStorage); err != nil { if !apierrors.IsConflict(err) { + return ctrl.Result{}, err } @@ -167,44 +143,21 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Initialize the status section with empty allocation statuses. - if len(nodeStorage.Status.Allocations) == 0 { - nodeStorage.Status.Allocations = make([]nnfv1alpha1.NnfNodeStorageAllocationStatus, nodeStorage.Spec.Count) - - for i := range nodeStorage.Status.Allocations { - allocation := &nodeStorage.Status.Allocations[i] - - allocation.StoragePool.Status = nnfv1alpha1.ResourceStarting - allocation.StorageGroup.Status = nnfv1alpha1.ResourceStarting - allocation.FileSystem.Status = nnfv1alpha1.ResourceStarting - allocation.FileShare.Status = nnfv1alpha1.ResourceStarting + if len(nnfNodeStorage.Status.Allocations) == 0 { + nnfNodeStorage.Status.Allocations = make([]nnfv1alpha1.NnfNodeStorageAllocationStatus, nnfNodeStorage.Spec.Count) + for i := range nnfNodeStorage.Status.Allocations { + nnfNodeStorage.Status.Allocations[i].Ready = false } + nnfNodeStorage.Status.Ready = false - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } // Loop through each allocation and create the storage - for i := 0; i < nodeStorage.Spec.Count; i++ { - // Allocate physical storage - result, err := r.allocateStorage(nodeStorage, i) - if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to allocate NVMe namespaces for allocation %v", i).WithError(err).WithMajor() - } - if result != nil { - return *result, nil - } - - // Create a block device in /dev that is accessible on the Rabbit node - result, err = r.createBlockDevice(ctx, nodeStorage, i) + for i := 0; i < nnfNodeStorage.Spec.Count; i++ { + result, err := r.createAllocation(ctx, nnfNodeStorage, i) if err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to attache NVMe namespace to Rabbit node for allocation %v", i).WithError(err).WithMajor() - } - if result != nil { - return *result, nil - } - // Format the block device from the Rabbit with a file system (if needed) - result, err = r.formatFileSystem(ctx, nodeStorage, i) - if err != nil { return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to format file system for allocation %v", i).WithError(err).WithMajor() } if result != nil { @@ -212,664 +165,117 @@ func (r *NnfNodeStorageReconciler) Reconcile(ctx context.Context, req ctrl.Reque } } - if nodeStorage.Spec.SetOwnerGroup && nodeStorage.Status.OwnerGroupStatus != nnfv1alpha1.ResourceReady { - if nodeStorage.Status.OwnerGroupStatus == "" { - nodeStorage.Status.OwnerGroupStatus = nnfv1alpha1.ResourceStarting - - return ctrl.Result{}, nil - } + for _, allocation := range nnfNodeStorage.Status.Allocations { + if allocation.Ready == false { + nnfNodeStorage.Status.Ready = false - if err := r.setLustreOwnerGroup(nodeStorage); err != nil { - return ctrl.Result{}, dwsv1alpha2.NewResourceError("unable to set owner and group for file system").WithError(err).WithMajor() + return ctrl.Result{Requeue: true}, nil } - - nodeStorage.Status.OwnerGroupStatus = nnfv1alpha1.ResourceReady } + nnfNodeStorage.Status.Ready = true + return ctrl.Result{}, nil } -func (r *NnfNodeStorageReconciler) allocateStorage(nodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { - log := r.Log.WithValues("NnfNodeStorage", types.NamespacedName{Name: nodeStorage.Name, Namespace: nodeStorage.Namespace}) - - ss := nnf.NewDefaultStorageService() +func (r *NnfNodeStorageReconciler) deleteAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage), "index", index) - allocationStatus := &nodeStorage.Status.Allocations[index] - - storagePoolID := fmt.Sprintf("%s-%d", nodeStorage.Name, index) - sp, err := r.createStoragePool(ss, storagePoolID, nodeStorage.Spec.Capacity) + blockDevice, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, index, log) if err != nil { - allocationStatus.StoragePool.Status = nnfv1alpha1.ResourceFailed - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create storage pool").WithError(err).WithMajor() - - } - - allocationStatus.StoragePool.Status = nnfv1alpha1.ResourceStatus(sp.Status) - allocationStatus.StoragePool.Health = nnfv1alpha1.ResourceHealth(sp.Status) - allocationStatus.CapacityAllocated = sp.CapacityBytes - - // If the SF ID is empty then we just created the resource. Save the ID in the NnfNodeStorage - if len(allocationStatus.StoragePool.ID) == 0 { - log.Info("Created storage pool", "Id", sp.Id) - allocationStatus.StoragePool.ID = sp.Id - - return &ctrl.Result{}, nil + return nil, err } - return nil, nil -} - -func (r *NnfNodeStorageReconciler) createBlockDevice(ctx context.Context, nodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { - log := r.Log.WithValues("NnfNodeStorage", types.NamespacedName{Name: nodeStorage.Name, Namespace: nodeStorage.Namespace}) - ss := nnf.NewDefaultStorageService() - - allocationStatus := &nodeStorage.Status.Allocations[index] - - // Create a Storage Group if none is currently present. Recall that a Storage Group - // is a mapping from the Storage Pool to a Server Endpoint. Establishing a Storage - // Group makes block storage available on the server, which itself is a prerequisite to - // any file system built on top of the block storage. - - // Retrieve the collection of endpoints for us to map - serverEndpointCollection := &sf.EndpointCollectionEndpointCollection{} - if err := ss.StorageServiceIdEndpointsGet(ss.Id(), serverEndpointCollection); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get service endpoint").WithError(err).WithFatal() + ran, err := fileSystem.Deactivate(ctx) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not deactivate file system").WithError(err).WithMajor() } - - // Get the Storage resource to map between compute node name and - // endpoint index. - namespacedName := types.NamespacedName{ - Name: nodeStorage.Namespace, - Namespace: "default", + if ran { + log.Info("Deactivated file system", "allocation", index) } - storage := &dwsv1alpha2.Storage{} - err := r.Get(ctx, namespacedName, storage) + ran, err = fileSystem.Destroy(ctx) if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not read storage resource").WithError(err) + return nil, dwsv1alpha2.NewResourceError("could not destroy file system").WithError(err).WithMajor() } - - // Build a list of all nodes with access to the storage - clients := []string{} - for _, server := range storage.Status.Access.Servers { - clients = append(clients, server.Name) + if ran { + log.Info("Destroyed file system", "allocation", index) } - for _, compute := range storage.Status.Access.Computes { - clients = append(clients, compute.Name) + ran, err = blockDevice.Deactivate(ctx) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not deactivate block devices").WithError(err).WithMajor() } - - // Make a list of all the endpoints and set whether they need a storage group based - // on the list of clients specified in the ClientEndpoints array - accessList := make([]bool, len(serverEndpointCollection.Members)) - for _, nodeName := range nodeStorage.Spec.ClientEndpoints[index].NodeNames { - for i, clientName := range clients { - if nodeName == clientName { - accessList[i] = true - } - } + if ran { + log.Info("Deactivated block device", "allocation", index) } - // Loop through the list of endpoints and delete the StorageGroup for endpoints where - // access==false, and create the StorageGroup for endpoints where access==true - for clientIndex, access := range accessList { - endpointRef := serverEndpointCollection.Members[clientIndex] - endpointID := endpointRef.OdataId[strings.LastIndex(endpointRef.OdataId, "/")+1:] - storageGroupID := fmt.Sprintf("%s-%d-%s", nodeStorage.Name, index, endpointID) - - // If the endpoint doesn't need a storage group, remove one if it exists - if access == false { - if _, err := r.getStorageGroup(ss, storageGroupID); err != nil { - continue - } - - if err := r.deleteStorageGroup(ss, storageGroupID); err != nil { - return nil, dwsv1alpha2.NewResourceError("could not delete storage group").WithError(err).WithMajor() - } - - log.Info("Deleted storage group", "storageGroupID", storageGroupID) - } else { - // The kind environment doesn't support endpoints beyond the Rabbit - if os.Getenv("ENVIRONMENT") == "kind" && endpointID != os.Getenv("RABBIT_NODE") { - continue - } - - endPoint, err := r.getEndpoint(ss, endpointID) - if err != nil { - return nil, dwsv1alpha2.NewResourceError("could not get endpoint").WithError(err).WithFatal() - } - - // Skip the endpoints that are not ready - if nnfv1alpha1.StaticResourceStatus(endPoint.Status) != nnfv1alpha1.ResourceReady { - continue - } - - sg, err := r.createStorageGroup(ss, storageGroupID, allocationStatus.StoragePool.ID, endpointID) - if err != nil { - allocationStatus.StorageGroup.Status = nnfv1alpha1.ResourceFailed - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create storage group").WithError(err).WithMajor() - } - - allocationStatus.StorageGroup.Status = nnfv1alpha1.ResourceStatus(sg.Status) - allocationStatus.StorageGroup.Health = nnfv1alpha1.ResourceHealth(sg.Status) - - // If the SF ID is empty then we just created the resource. Save the ID in the NnfNodeStorage - if len(allocationStatus.StorageGroup.ID) == 0 { - log.Info("Created storage group", "Id", storageGroupID) - allocationStatus.StorageGroup.ID = sg.Id - - return &ctrl.Result{}, nil - } - } + ran, err = blockDevice.Destroy(ctx) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not destroy block devices").WithError(err).WithMajor() + } + if ran { + log.Info("Destroyed block device", "allocation", index) } return nil, nil } -func (r *NnfNodeStorageReconciler) formatFileSystem(ctx context.Context, nodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { - log := r.Log.WithValues("NnfNodeStorage", types.NamespacedName{Name: nodeStorage.Name, Namespace: nodeStorage.Namespace}) - ss := nnf.NewDefaultStorageService() - - allocationStatus := &nodeStorage.Status.Allocations[index] - - // Check whether everything in the spec is filled in to make the FS. Lustre - // MDTs and OSTs won't have their MgsNode field filled in until after the MGT - // is created. - if !r.isSpecComplete(nodeStorage) { - return &ctrl.Result{}, nil - } - - // Find the Rabbit node endpoint to collect LNet information - endpoint, err := r.getEndpoint(ss, os.Getenv("RABBIT_NODE")) - if err != nil { - nodeStorage.Status.Error = dwsv1alpha2.NewResourceError("could not get endpoint").WithError(err).WithFatal() - log.Info(nodeStorage.Status.Error.Error()) - - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get endpoint").WithError(err).WithMajor() - } +func (r *NnfNodeStorageReconciler) createAllocation(ctx context.Context, nnfNodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfNodeStorage", client.ObjectKeyFromObject(nnfNodeStorage), "index", index) - nnfStorageProfile, err := getPinnedStorageProfileFromLabel(ctx, r.Client, nodeStorage) + blockDevice, fileSystem, err := getBlockDeviceAndFileSystem(ctx, r.Client, nnfNodeStorage, index, log) if err != nil { - allocationStatus.FileSystem.Status = nnfv1alpha1.ResourceFailed - nodeStorage.Status.Error = dwsv1alpha2.NewResourceError("could not find pinned storage profile").WithError(err).WithFatal() - log.Info(nodeStorage.Status.Error.Error()) - - return &ctrl.Result{}, nil - } - - // Create the FileSystem - oem := nnfserver.FileSystemOem{ - Type: nodeStorage.Spec.FileSystemType, - } - - if oem.Type == "lustre" { - setLusCmdLines := func(c *nnfv1alpha1.NnfStorageProfileLustreCmdLines) { - oem.MkfsMount.Mkfs = c.Mkfs - oem.ZfsCmd.ZpoolCreate = c.ZpoolCreate - } - - setLusOpts := func(c *nnfv1alpha1.NnfStorageProfileLustreMiscOptions) { - oem.MkfsMount.Mount = c.MountTarget - } - - oem.Name = nodeStorage.Spec.LustreStorage.FileSystemName - oem.Lustre.Index = nodeStorage.Spec.LustreStorage.StartIndex + index - oem.Lustre.MgsNode = nodeStorage.Spec.LustreStorage.MgsNode - oem.Lustre.TargetType = nodeStorage.Spec.LustreStorage.TargetType - oem.Lustre.BackFs = nodeStorage.Spec.LustreStorage.BackFs - - switch nodeStorage.Spec.LustreStorage.TargetType { - case "MGT": - setLusCmdLines(&nnfStorageProfile.Data.LustreStorage.MgtCmdLines) - setLusOpts(&nnfStorageProfile.Data.LustreStorage.MgtOptions) - case "MDT": - setLusCmdLines(&nnfStorageProfile.Data.LustreStorage.MdtCmdLines) - setLusOpts(&nnfStorageProfile.Data.LustreStorage.MdtOptions) - case "MGTMDT": - setLusCmdLines(&nnfStorageProfile.Data.LustreStorage.MgtMdtCmdLines) - setLusOpts(&nnfStorageProfile.Data.LustreStorage.MgtMdtOptions) - case "OST": - setLusCmdLines(&nnfStorageProfile.Data.LustreStorage.OstCmdLines) - setLusOpts(&nnfStorageProfile.Data.LustreStorage.OstOptions) - } - } - - setCmdLines := func(c *nnfv1alpha1.NnfStorageProfileCmdLines) { - oem.MkfsMount.Mkfs = c.Mkfs - oem.LvmCmd.PvCreate = c.PvCreate - oem.LvmCmd.VgCreate = c.VgCreate - oem.LvmCmd.VgChange = nnfserver.FileSystemOemVgChange{ - Activate: c.VgChange.Activate, - Deactivate: c.VgChange.Deactivate, - LockStart: c.VgChange.LockStart, - } - oem.LvmCmd.VgRemove = c.VgRemove - oem.LvmCmd.LvCreate = c.LvCreate - oem.LvmCmd.LvRemove = c.LvRemove - } - - setOpts := func(c *nnfv1alpha1.NnfStorageProfileMiscOptions) { - oem.MkfsMount.Mount = c.MountRabbit - } - - if oem.Type == "gfs2" { - // GFS2 requires a maximum of 16 alphanumeric, hyphen, or underscore characters. Allow up to 99 storage indicies and - // generate a simple MD5SUM hash value from the node storage name for the tail end. Although not guaranteed, this - // should reduce the likelihood of conflicts to a diminishingly small value. - checksum := md5.Sum([]byte(nodeStorage.Name)) - oem.Name = fmt.Sprintf("fs-%02d-%x", index, string(checksum[0:5])) - - // The cluster name is the "name" of the Rabbit, which is mapped to the node storage namespace (since NNF Node Storage - // is rabbit namespace scoped). - oem.Gfs2.ClusterName = nodeStorage.Namespace - setCmdLines(&nnfStorageProfile.Data.GFS2Storage.CmdLines) - setOpts(&nnfStorageProfile.Data.GFS2Storage.Options) - } - - if oem.Type == "xfs" { - setCmdLines(&nnfStorageProfile.Data.XFSStorage.CmdLines) - setOpts(&nnfStorageProfile.Data.XFSStorage.Options) - } - - if oem.Type == "raw" { - setCmdLines(&nnfStorageProfile.Data.RawStorage.CmdLines) + return nil, err } - fileSystemID := fmt.Sprintf("%s-%d", nodeStorage.Name, index) - fs, err := r.createFileSystem(ss, fileSystemID, allocationStatus.StoragePool.ID, oem) + allocationStatus := &nnfNodeStorage.Status.Allocations[index] + ran, err := blockDevice.Create(ctx, allocationStatus.Ready) if err != nil { - allocationStatus.FileSystem.Status = nnfv1alpha1.ResourceFailed - - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create file system").WithError(err).WithMajor() - } - - allocationStatus.FileSystem.Status = nnfv1alpha1.ResourceReady - allocationStatus.FileSystem.Health = nnfv1alpha1.ResourceOkay - - // If the SF ID is empty then we just created the resource. Save the ID in the NnfNodeStorage - if len(allocationStatus.FileSystem.ID) == 0 { - log.Info("Created filesystem", "Id", fs.Id) - allocationStatus.FileSystem.ID = fs.Id - - return &ctrl.Result{}, nil + return nil, dwsv1alpha2.NewResourceError("could not create block devices").WithError(err).WithMajor() } - - // Create the FileShare - fileShareID := fmt.Sprintf("%s-%d", nodeStorage.Name, index) - - mountPath := "" - sh, err := r.getFileShare(ss, fileShareID, allocationStatus.FileSystem.ID) - if err == nil { - mountPath = sh.FileSharePath + if ran { + log.Info("Created block device", "allocation", index) } - shareOptions := make(map[string]interface{}) - var volumeGroupName, logicalVolumeName string - if nodeStorage.Spec.FileSystemType == "lustre" { - targetIndex := nodeStorage.Spec.LustreStorage.StartIndex + index - mountPath = "/mnt/lustre/" + nodeStorage.Spec.LustreStorage.FileSystemName + "/" + nodeStorage.Spec.LustreStorage.TargetType + strconv.Itoa(targetIndex) - } else { - volumeGroupName, logicalVolumeName, err = r.lvmNames(ctx, nodeStorage, index) + // We don't need to activate the block device here. It will be activated either when there is a mkfs, or when it's used + // by a ClientMount + if fileSystem != nil { + ran, err = fileSystem.Create(ctx, allocationStatus.Ready) if err != nil { - allocationStatus.FileShare.Status = nnfv1alpha1.ResourceFailed - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get VG/LV names").WithError(err).WithFatal() + return nil, dwsv1alpha2.NewResourceError("could not create file system").WithError(err).WithMajor() } - - shareOptions["volumeGroupName"] = volumeGroupName - shareOptions["logicalVolumeName"] = logicalVolumeName - shareOptions["userID"] = int(nodeStorage.Spec.UserID) - shareOptions["groupID"] = int(nodeStorage.Spec.GroupID) - } - - sh, err = r.createFileShare(ss, fileShareID, allocationStatus.FileSystem.ID, os.Getenv("RABBIT_NODE"), mountPath, shareOptions) - if err != nil { - allocationStatus.FileShare.Status = nnfv1alpha1.ResourceFailed - return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create file share").WithError(err).WithMajor() - } - - nid := "" - if nidRaw, present := endpoint.Oem["LNetNids"]; present && nodeStorage.Spec.FileSystemType == "lustre" { - nidList := nidRaw.([]string) - if len(nidList) > 0 { - // TODO: If there are multiple LNet Nids, have a way to pick - // which network we want to use. - nid = nidList[0] + if ran { + log.Info("Created file system", "allocation", index) } - } - - allocationStatus.FileShare.Status = nnfv1alpha1.ResourceStatus(sh.Status) - allocationStatus.FileShare.Health = nnfv1alpha1.ResourceHealth(sh.Status) - nodeStorage.Status.LustreStorage.Nid = nid - - // If the SF ID is empty then we just created the resource. Save the ID in the NnfNodeStorage - if len(allocationStatus.FileShare.ID) == 0 { - log.Info("Created file share", "Id", sh.Id) - allocationStatus.FileShare.ID = sh.Id - allocationStatus.VolumeGroup = volumeGroupName - allocationStatus.LogicalVolume = logicalVolumeName - - return &ctrl.Result{}, nil - } - - return nil, nil -} - -func (r *NnfNodeStorageReconciler) setLustreOwnerGroup(nodeStorage *nnfv1alpha1.NnfNodeStorage) (err error) { - log := r.Log.WithValues("NnfNodeStorage", types.NamespacedName{Name: nodeStorage.Name, Namespace: nodeStorage.Namespace}) - - _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT") - if found || os.Getenv("ENVIRONMENT") == "kind" { - return nil - } - - if nodeStorage.Spec.FileSystemType != "lustre" { - return fmt.Errorf("Invalid file system type '%s' for setting owner/group", nodeStorage.Spec.FileSystemType) - } - - target := "/mnt/nnf/client/" + nodeStorage.Name - if err := os.MkdirAll(target, 0755); err != nil { - log.Error(err, "Mkdir failed") - return err - } - defer os.RemoveAll(target) - - mounter := mount.New("") - mounted, err := mounter.IsMountPoint(target) - if err != nil { - return err - } - source := nodeStorage.Spec.LustreStorage.MgsNode + ":/" + nodeStorage.Spec.LustreStorage.FileSystemName - - if !mounted { - if err := mounter.Mount(source, target, "lustre", nil); err != nil { - log.Error(err, "Mount failed") - return err + ran, err = fileSystem.Activate(ctx, allocationStatus.Ready) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not activate file system").WithError(err).WithMajor() } - } - defer func() { - unmountErr := mounter.Unmount(target) - if err == nil { - err = unmountErr + if ran { + log.Info("Activated file system", "allocation", index) } - }() - - if err := os.Chown(target, int(nodeStorage.Spec.UserID), int(nodeStorage.Spec.GroupID)); err != nil { - log.Error(err, "Chown failed") - return err - } - - return nil -} - -func (r *NnfNodeStorageReconciler) deleteStorage(nodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (*ctrl.Result, error) { - log := r.Log.WithValues("NnfNodeStorage", types.NamespacedName{Name: nodeStorage.Name, Namespace: nodeStorage.Namespace}) - - ss := nnf.NewDefaultStorageService() - - allocationStatus := &nodeStorage.Status.Allocations[index] - if allocationStatus.StoragePool.ID == "" { - return nil, nil - } - - log.Info("Deleting storage pool", "Id", allocationStatus.StoragePool.ID) - - err := r.deleteStoragePool(ss, allocationStatus.StoragePool.ID) - if err != nil { - ecErr, ok := err.(*ec.ControllerError) - // If the error is from a 404 error, then there's nothing to clean up and we - // assume everything has been deleted - if !ok || ecErr.StatusCode() != http.StatusNotFound { - allocationStatus.FileShare.Status = nnfv1alpha1.ResourceFailed - nodeStorage.Status.Error = dwsv1alpha2.NewResourceError("could not delete storage pool").WithError(err).WithFatal() - log.Info(nodeStorage.Status.Error.Error()) - - return &ctrl.Result{Requeue: true}, nil + ran, err = fileSystem.SetPermissions(ctx, nnfNodeStorage.Spec.UserID, nnfNodeStorage.Spec.GroupID, allocationStatus.Ready) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not set file system permissions").WithError(err).WithMajor() } - } - - allocationStatus.StoragePool.ID = "" - allocationStatus.StorageGroup.ID = "" - allocationStatus.FileSystem.ID = "" - allocationStatus.FileShare.ID = "" - allocationStatus.StoragePool.Status = nnfv1alpha1.ResourceDeleted - allocationStatus.StorageGroup.Status = nnfv1alpha1.ResourceDeleted - allocationStatus.FileSystem.Status = nnfv1alpha1.ResourceDeleted - allocationStatus.FileShare.Status = nnfv1alpha1.ResourceDeleted - allocationStatus.VolumeGroup = "" - allocationStatus.LogicalVolume = "" - nodeStorage.Status.LustreStorage.Nid = "" - - return &ctrl.Result{}, nil -} - -func (r *NnfNodeStorageReconciler) lvmNames(ctx context.Context, nodeStorage *nnfv1alpha1.NnfNodeStorage, index int) (string, string, error) { - labels := nodeStorage.GetLabels() - - workflowName, ok := labels[dwsv1alpha2.WorkflowNameLabel] - if !ok { - return "", "", fmt.Errorf("missing Workflow label on NnfNodeStorage") - } - - workflowNamespace, ok := labels[dwsv1alpha2.WorkflowNamespaceLabel] - if !ok { - return "", "", fmt.Errorf("missing Workflow label on NnfNodeStorage") - } - - directiveIndex, ok := labels[nnfv1alpha1.DirectiveIndexLabel] - if !ok { - return "", "", fmt.Errorf("missing directive index label on NnfNodeStorage") - } - - workflow := &dwsv1alpha2.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - Name: workflowName, - Namespace: workflowNamespace, - }, - } - if err := r.Get(ctx, client.ObjectKeyFromObject(workflow), workflow); err != nil { - return "", "", dwsv1alpha2.NewResourceError("could get workflow").WithError(err) - } - - return fmt.Sprintf("%s_%s_%d", workflow.GetUID(), directiveIndex, index), "lv", nil -} - -func (r *NnfNodeStorageReconciler) isSpecComplete(nodeStorage *nnfv1alpha1.NnfNodeStorage) bool { - if nodeStorage.Spec.FileSystemType != "lustre" { - return true - } - - if nodeStorage.Spec.LustreStorage.TargetType == "MGT" || nodeStorage.Spec.LustreStorage.TargetType == "MGTMDT" { - return true - } - - if len(nodeStorage.Spec.LustreStorage.MgsNode) > 0 { - return true - } - - return false -} - -func (r *NnfNodeStorageReconciler) createStoragePool(ss nnf.StorageServiceApi, id string, capacity int64) (*sf.StoragePoolV150StoragePool, error) { - sp := &sf.StoragePoolV150StoragePool{ - Id: id, - CapacityBytes: capacity, - Oem: openapi.MarshalOem(nnf.AllocationPolicyOem{ - Policy: nnf.SpareAllocationPolicyType, - Compliance: nnf.RelaxedAllocationComplianceType, - }), - } - - if err := ss.StorageServiceIdStoragePoolIdPut(ss.Id(), id, sp); err != nil { - resourceErr := dwsv1alpha2.NewResourceError("could not allocate storage pool").WithError(err) - ecErr, ok := err.(*ec.ControllerError) - if ok { - switch ecErr.Cause() { - case "Insufficient capacity available": - return nil, resourceErr.WithUserMessage("insufficient capacity available").WithWLM().WithFatal() - default: - return nil, resourceErr - } + if ran { + log.Info("Set file system permission", "allocation", index) } - return nil, resourceErr - } - - return sp, nil -} - -func (r *NnfNodeStorageReconciler) getStoragePool(ss nnf.StorageServiceApi, id string) (*sf.StoragePoolV150StoragePool, error) { - sp := &sf.StoragePoolV150StoragePool{} - - if err := ss.StorageServiceIdStoragePoolIdGet(ss.Id(), id, sp); err != nil { - return nil, err - } - - return sp, nil -} - -func (r *NnfNodeStorageReconciler) deleteStoragePool(ss nnf.StorageServiceApi, id string) error { - if err := ss.StorageServiceIdStoragePoolIdDelete(ss.Id(), id); err != nil { - return err - } - - return nil -} - -func (r *NnfNodeStorageReconciler) getEndpoint(ss nnf.StorageServiceApi, id string) (*sf.EndpointV150Endpoint, error) { - ep := &sf.EndpointV150Endpoint{} - - if err := ss.StorageServiceIdEndpointIdGet(ss.Id(), id, ep); err != nil { - return nil, err - } - - return ep, nil -} - -func (r *NnfNodeStorageReconciler) createStorageGroup(ss nnf.StorageServiceApi, id string, spID string, epID string) (*sf.StorageGroupV150StorageGroup, error) { - sp, err := r.getStoragePool(ss, spID) - if err != nil { - return nil, err - } - - ep, err := r.getEndpoint(ss, epID) - if err != nil { - return nil, err - } - - sg := &sf.StorageGroupV150StorageGroup{ - Id: id, - Links: sf.StorageGroupV150Links{ - StoragePool: sf.OdataV4IdRef{OdataId: sp.OdataId}, - ServerEndpoint: sf.OdataV4IdRef{OdataId: ep.OdataId}, - }, - } - - if err := ss.StorageServiceIdStorageGroupIdPut(ss.Id(), id, sg); err != nil { - return nil, err - } - - return sg, nil -} - -func (r *NnfNodeStorageReconciler) getStorageGroup(ss nnf.StorageServiceApi, id string) (*sf.StorageGroupV150StorageGroup, error) { - sg := &sf.StorageGroupV150StorageGroup{} - - if err := ss.StorageServiceIdStorageGroupIdGet(ss.Id(), id, sg); err != nil { - return nil, err - } - - return sg, nil -} - -func (r *NnfNodeStorageReconciler) deleteStorageGroup(ss nnf.StorageServiceApi, id string) error { - return ss.StorageServiceIdStorageGroupIdDelete(ss.Id(), id) -} - -func (r *NnfNodeStorageReconciler) createFileShare(ss nnf.StorageServiceApi, id string, fsID string, epID string, mountPath string, options map[string]interface{}) (*sf.FileShareV120FileShare, error) { - fs, err := r.getFileSystem(ss, fsID) - if err != nil { - return nil, err - } - - ep, err := r.getEndpoint(ss, epID) - if err != nil { - return nil, err - } - - sh := &sf.FileShareV120FileShare{ - Id: id, - FileSharePath: mountPath, - Oem: options, - Links: sf.FileShareV120Links{ - FileSystem: sf.OdataV4IdRef{OdataId: fs.OdataId}, - Endpoint: sf.OdataV4IdRef{OdataId: ep.OdataId}, - }, } - if err := ss.StorageServiceIdFileSystemIdExportedShareIdPut(ss.Id(), id, fs.Id, sh); err != nil { - return nil, err - } - - return sh, nil -} - -func (r *NnfNodeStorageReconciler) getFileShare(ss nnf.StorageServiceApi, id string, fsID string) (*sf.FileShareV120FileShare, error) { - fs, err := r.getFileSystem(ss, fsID) - if err != nil { - return nil, err - } - - sh := &sf.FileShareV120FileShare{} - - if err := ss.StorageServiceIdFileSystemIdExportedShareIdGet(ss.Id(), fs.Id, id, sh); err != nil { - return nil, err - } - - return sh, nil -} - -func (r *NnfNodeStorageReconciler) createFileSystem(ss nnf.StorageServiceApi, id string, spID string, oem nnfserver.FileSystemOem) (*sf.FileSystemV122FileSystem, error) { - sp, err := r.getStoragePool(ss, spID) - if err != nil { - return nil, err - } - - if oem.Name == "" { - oem.Name = id - } - - fs := &sf.FileSystemV122FileSystem{ - Id: id, - Links: sf.FileSystemV122Links{ - StoragePool: sf.OdataV4IdRef{OdataId: sp.OdataId}, - }, - Oem: openapi.MarshalOem(oem), - } - - if err := ss.StorageServiceIdFileSystemIdPut(ss.Id(), id, fs); err != nil { - return nil, err - } - - return fs, nil -} - -func (r *NnfNodeStorageReconciler) getFileSystem(ss nnf.StorageServiceApi, id string) (*sf.FileSystemV122FileSystem, error) { - fs := &sf.FileSystemV122FileSystem{} + allocationStatus.Ready = true - if err := ss.StorageServiceIdFileSystemIdGet(ss.Id(), id, fs); err != nil { - return nil, err - } - - return fs, nil + return nil, nil } // SetupWithManager sets up the controller with the Manager. func (r *NnfNodeStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { // nnf-ec is not thread safe, so we are limited to a single reconcile thread. return ctrl.NewControllerManagedBy(mgr). - WithOptions(controller.Options{MaxConcurrentReconciles: 1}). For(&nnfv1alpha1.NnfNodeStorage{}). Complete(r) } diff --git a/internal/controller/nnf_node_storage_controller_test.go b/internal/controller/nnf_node_storage_controller_test.go index 8935403e7..e6796d71c 100644 --- a/internal/controller/nnf_node_storage_controller_test.go +++ b/internal/controller/nnf_node_storage_controller_test.go @@ -61,8 +61,7 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { Namespace: key.Namespace, }, Spec: nnfv1alpha1.NnfNodeStorageSpec{ - Count: 1, - Capacity: 1024 * 1024 * 1024, + Count: 1, }, } }) @@ -100,8 +99,8 @@ var _ = PDescribe("NNF Node Storage Controller Test", func() { storage.Spec.LustreStorage = nnfv1alpha1.LustreStorageSpec{ FileSystemName: "test", StartIndex: 0, - MgsNode: "test", - TargetType: "MGT", + MgsAddress: "test", + TargetType: "mgt", BackFs: "zfs", } }) diff --git a/internal/controller/nnf_persistentstorageinstance_controller.go b/internal/controller/nnf_persistentstorageinstance_controller.go index 96fe633af..c8a34b77c 100644 --- a/internal/controller/nnf_persistentstorageinstance_controller.go +++ b/internal/controller/nnf_persistentstorageinstance_controller.go @@ -199,7 +199,7 @@ func (r *PersistentStorageReconciler) Reconcile(ctx context.Context, req ctrl.Re var complete bool = true // Status section should be usable now, check for Ready for _, set := range nnfStorage.Status.AllocationSets { - if set.Status != "Ready" { + if set.Ready == false { complete = false } } diff --git a/internal/controller/nnf_storage_controller.go b/internal/controller/nnf_storage_controller.go index 89ba6b122..2b5137b30 100644 --- a/internal/controller/nnf_storage_controller.go +++ b/internal/controller/nnf_storage_controller.go @@ -21,10 +21,11 @@ package controller import ( "context" + "fmt" + "os" "reflect" "runtime" "strconv" - "time" "github.com/go-logr/logr" @@ -76,6 +77,7 @@ const ( //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorages/status,verbs=get;update;patch //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorages/finalizers,verbs=update //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodestorages,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfnodeblockstorages,verbs=get;list;watch;create;update;patch;delete;deletecollection //+kubebuilder:rbac:groups=nnf.cray.hpe.com,resources=nnfstorageprofiles,verbs=get;create;list;watch;update;patch;delete;deletecollection // The Storage Controller will list and make modifications to individual NNF Nodes, so include the @@ -157,15 +159,40 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) if len(storage.Status.AllocationSets) != len(storage.Spec.AllocationSets) { storage.Status.AllocationSets = make([]nnfv1alpha1.NnfStorageAllocationSetStatus, len(storage.Spec.AllocationSets)) for i := range storage.Status.AllocationSets { - storage.Status.AllocationSets[i].Status = nnfv1alpha1.ResourceStarting + storage.Status.AllocationSets[i].Ready = false } - storage.Status.Status = nnfv1alpha1.ResourceStarting + storage.Status.Ready = false return ctrl.Result{}, nil } storage.Status.Error = nil + // For each allocation, create the NnfNodeBlockStorage resources to fan out to the Rabbit nodes + for i := range storage.Spec.AllocationSets { + res, err := r.createNodeBlockStorage(ctx, storage, i) + if err != nil { + return ctrl.Result{}, err + } + + if res != nil { + return *res, nil + } + } + + // Collect status information from the NnfNodeBlockStorage resources and aggregate it into the + // NnfStorage + for i := range storage.Spec.AllocationSets { + res, err := r.aggregateNodeBlockStorageStatus(ctx, storage, i) + if err != nil { + return ctrl.Result{}, err + } + + if res != nil { + return *res, nil + } + } + // For each allocation, create the NnfNodeStorage resources to fan out to the Rabbit nodes for i, allocationSet := range storage.Spec.AllocationSets { // Add a reference to the external MGS PersistentStorageInstance if necessary @@ -194,25 +221,23 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } if res != nil { - return *res, nil + if *res == (ctrl.Result{}) { + continue + } else { + return *res, nil + } } } // Wait for all the allocation sets to be ready for _, allocationSet := range storage.Status.AllocationSets { - if allocationSet.Status != nnfv1alpha1.ResourceReady { - return ctrl.Result{RequeueAfter: time.Minute}, nil + if allocationSet.Ready == false { + return ctrl.Result{}, nil } } - // For Lustre, the owner and group have to be set once all the Lustre targets - // have completed. This is done on the Rabbit node that hosts OST 0. - for i, allocationSet := range storage.Spec.AllocationSets { - if allocationSet.TargetType != "OST" { - continue - } - - res, err := r.setLustreOwnerGroup(ctx, storage, i) + if storage.Spec.FileSystemType == "lustre" && storage.Status.Ready == false { + res, err := r.setLustreOwnerGroup(ctx, storage) if err != nil { return ctrl.Result{}, err } @@ -223,7 +248,7 @@ func (r *NnfStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // All allocation sets are ready and the owner/group is set - storage.Status.Status = nnfv1alpha1.ResourceReady + storage.Status.Ready = true return ctrl.Result{}, nil } @@ -291,13 +316,149 @@ func (r *NnfStorageReconciler) removePersistentStorageReference(ctx context.Cont return nil } +func (r *NnfStorageReconciler) createNodeBlockStorage(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) + + allocationSet := nnfStorage.Spec.AllocationSets[allocationSetIndex] + for i, node := range allocationSet.Nodes { + // Per Rabbit namespace. + nnfNodeBlockStorage := &nnfv1alpha1.NnfNodeBlockStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: nnfNodeStorageName(nnfStorage, allocationSetIndex, i), + Namespace: node.Name, + }, + } + + result, err := ctrl.CreateOrUpdate(ctx, r.Client, nnfNodeBlockStorage, + func() error { + dwsv1alpha2.InheritParentLabels(nnfNodeBlockStorage, nnfStorage) + dwsv1alpha2.AddOwnerLabels(nnfNodeBlockStorage, nnfStorage) + + labels := nnfNodeBlockStorage.GetLabels() + labels[nnfv1alpha1.AllocationSetLabel] = allocationSet.Name + nnfNodeBlockStorage.SetLabels(labels) + + if len(nnfNodeBlockStorage.Spec.Allocations) == 0 { + nnfNodeBlockStorage.Spec.Allocations = make([]nnfv1alpha1.NnfNodeBlockStorageAllocationSpec, node.Count) + } + + if len(nnfNodeBlockStorage.Spec.Allocations) != node.Count { + return dwsv1alpha2.NewResourceError("block storage allocation count incorrect. found %v, expected %v", len(nnfNodeBlockStorage.Spec.Allocations), node.Count).WithFatal() + } + + for i := range nnfNodeBlockStorage.Spec.Allocations { + nnfNodeBlockStorage.Spec.Allocations[i].Capacity = allocationSet.Capacity + if len(nnfNodeBlockStorage.Spec.Allocations[i].Access) == 0 { + nnfNodeBlockStorage.Spec.Allocations[i].Access = append(nnfNodeBlockStorage.Spec.Allocations[i].Access, node.Name) + } + } + + return nil + }) + if err != nil { + if !apierrors.IsConflict(err) { + return nil, err + } + + return &ctrl.Result{Requeue: true}, nil + } + + if result == controllerutil.OperationResultCreated { + log.Info("Created NnfNodeBlockStorage", "Name", nnfNodeBlockStorage.Name, "Namespace", nnfNodeBlockStorage.Namespace) + } else if result == controllerutil.OperationResultNone { + // no change + } else { + log.Info("Updated NnfNodeBlockStorage", "Name", nnfNodeBlockStorage.Name, "Namespace", nnfNodeBlockStorage.Namespace) + } + } + + return nil, nil +} + +// Get the status from all the child NnfNodeBlockStorage resources and use them to build the status +// for the NnfStorage. +func (r *NnfStorageReconciler) aggregateNodeBlockStorageStatus(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { + allocationSet := &nnfStorage.Status.AllocationSets[allocationSetIndex] + allocationSet.AllocationCount = 0 + + nnfNodeBlockStorageList := &nnfv1alpha1.NnfNodeBlockStorageList{} + matchLabels := dwsv1alpha2.MatchingOwner(nnfStorage) + matchLabels[nnfv1alpha1.AllocationSetLabel] = nnfStorage.Spec.AllocationSets[allocationSetIndex].Name + + listOptions := []client.ListOption{ + matchLabels, + } + + if err := r.List(ctx, nnfNodeBlockStorageList, listOptions...); err != nil { + return &ctrl.Result{Requeue: true}, nil + } + + // Ensure that we found all the NnfNodeStorage resources we were expecting + if len(nnfNodeBlockStorageList.Items) != len(nnfStorage.Spec.AllocationSets[allocationSetIndex].Nodes) { + return &ctrl.Result{}, nil + } + + for _, nnfNodeBlockStorage := range nnfNodeBlockStorageList.Items { + for _, nodeAllocation := range nnfNodeBlockStorage.Status.Allocations { + if nodeAllocation.CapacityAllocated > 0 { + allocationSet.AllocationCount++ + } + } + + if nnfNodeBlockStorage.Status.Error != nil { + nnfStorage.Status.SetResourceError(nnfNodeBlockStorage.Status.Error) + } + + if nnfNodeBlockStorage.Status.Ready == false { + return &ctrl.Result{}, nil + } + } + + return nil, nil +} + // Create an NnfNodeStorage if it doesn't exist, or update it if it requires updating. Each // Rabbit node gets an NnfNodeStorage, and there may be multiple allocations requested in it. // This limits the number of resources that have to be broadcast to the Rabbits. func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) - allocationSet := storage.Spec.AllocationSets[allocationSetIndex] + mgsAddress := storage.Spec.AllocationSets[allocationSetIndex].MgsAddress + if storage.Spec.FileSystemType == "lustre" { + mgsNode := "" + for i, allocationSet := range storage.Spec.AllocationSets { + if allocationSet.TargetType == "mgt" || allocationSet.TargetType == "mgtmdt" { + // Wait for the MGT to be set up before creating nnfnodestorages for the other allocation sets + if allocationSetIndex != i { + if storage.Status.AllocationSets[i].Ready == false { + return nil, nil + } + } + + mgsNode = allocationSet.Nodes[0].Name + } + } + + if mgsNode != "" { + nnfNode := &nnfv1alpha1.NnfNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: mgsNode, + }, + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNode), nnfNode); err != nil { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get NnfNode: %v", client.ObjectKeyFromObject(nnfNode)).WithError(err) + } + + mgsAddress = nnfNode.Status.LNetNid + } + } + + // Save the MGS address in the status section so we don't have to look in the NnfNodeStorage + storage.Status.MgsAddress = mgsAddress + + allocationSet := storage.Spec.AllocationSets[allocationSetIndex] startIndex := 0 for i, node := range allocationSet.Nodes { // Per Rabbit namespace. @@ -317,37 +478,24 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n labels[nnfv1alpha1.AllocationSetLabel] = allocationSet.Name nnfNodeStorage.SetLabels(labels) + nnfNodeStorage.Spec.BlockReference = corev1.ObjectReference{ + Name: nnfNodeStorageName(storage, allocationSetIndex, i), + Namespace: node.Name, + Kind: reflect.TypeOf(nnfv1alpha1.NnfNodeBlockStorage{}).Name(), + } nnfNodeStorage.Spec.UserID = storage.Spec.UserID nnfNodeStorage.Spec.GroupID = storage.Spec.GroupID - nnfNodeStorage.Spec.Capacity = allocationSet.Capacity nnfNodeStorage.Spec.Count = node.Count nnfNodeStorage.Spec.FileSystemType = storage.Spec.FileSystemType nnfNodeStorage.Spec.LustreStorage.StartIndex = startIndex nnfNodeStorage.Spec.LustreStorage.FileSystemName = allocationSet.FileSystemName nnfNodeStorage.Spec.LustreStorage.BackFs = allocationSet.BackFs nnfNodeStorage.Spec.LustreStorage.TargetType = allocationSet.TargetType + nnfNodeStorage.Spec.LustreStorage.MgsAddress = mgsAddress // If this isn't the first allocation, then change MGTMDT to MDT so that we only get a single MGT - if allocationSet.TargetType == "MGTMDT" && startIndex != 0 { - nnfNodeStorage.Spec.LustreStorage.TargetType = "MDT" - } - - // Create the list of client endpoints for each allocation and initialize it with - // the rabbit node endpoint - if len(nnfNodeStorage.Spec.ClientEndpoints) == 0 { - nnfNodeStorage.Spec.ClientEndpoints = make([]nnfv1alpha1.ClientEndpointsSpec, node.Count) - for k := range nnfNodeStorage.Spec.ClientEndpoints { - nnfNodeStorage.Spec.ClientEndpoints[k].AllocationIndex = k - nnfNodeStorage.Spec.ClientEndpoints[k].NodeNames = append(nnfNodeStorage.Spec.ClientEndpoints[k].NodeNames, node.Name) - } - } - - if nnfNodeStorage.Spec.LustreStorage.TargetType == "MDT" || nnfNodeStorage.Spec.LustreStorage.TargetType == "OST" { - if len(allocationSet.ExternalMgsNid) > 0 { - nnfNodeStorage.Spec.LustreStorage.MgsNode = allocationSet.ExternalMgsNid - } else { - nnfNodeStorage.Spec.LustreStorage.MgsNode = storage.Status.MgsNode - } + if allocationSet.TargetType == "mgtmdt" && startIndex != 0 { + nnfNodeStorage.Spec.LustreStorage.TargetType = "mdt" } return nil @@ -377,12 +525,7 @@ func (r *NnfStorageReconciler) createNodeStorage(ctx context.Context, storage *n // Get the status from all the child NnfNodeStorage resources and use them to build the status // for the NnfStorage. func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, storage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { - allocationSet := &storage.Status.AllocationSets[allocationSetIndex] - - var health nnfv1alpha1.NnfResourceHealthType = nnfv1alpha1.ResourceOkay - var status nnfv1alpha1.NnfResourceStatusType = nnfv1alpha1.ResourceReady - - allocationSet.AllocationCount = 0 + log := r.Log.WithValues("NnfStorage", types.NamespacedName{Name: storage.Name, Namespace: storage.Namespace}) nnfNodeStorageList := &nnfv1alpha1.NnfNodeStorageList{} matchLabels := dwsv1alpha2.MatchingOwner(storage) @@ -396,89 +539,168 @@ func (r *NnfStorageReconciler) aggregateNodeStorageStatus(ctx context.Context, s return &ctrl.Result{Requeue: true}, nil } - // Ensure that we found all the NnfNodeStorage resources we were expecting - if len(nnfNodeStorageList.Items) != len(storage.Spec.AllocationSets[allocationSetIndex].Nodes) { - status = nnfv1alpha1.ResourceStarting - } - for _, nnfNodeStorage := range nnfNodeStorageList.Items { - if nnfNodeStorage.Spec.LustreStorage.TargetType == "MGT" || nnfNodeStorage.Spec.LustreStorage.TargetType == "MGTMDT" { - storage.Status.MgsNode = nnfNodeStorage.Status.LustreStorage.Nid + if nnfNodeStorage.Status.Error != nil { + storage.Status.SetResourceError(nnfNodeStorage.Status.Error) } - // Wait until the status section of the nnfNodeStorage has been initialized - if len(nnfNodeStorage.Status.Allocations) != nnfNodeStorage.Spec.Count { - // Set the Status to starting unless we've found a failure in one - // of the earlier nnfNodeStorages - startingStatus := nnfv1alpha1.ResourceStarting - startingStatus.UpdateIfWorseThan(&status) - allocationSet.Status = status - allocationSet.Health = health - + if nnfNodeStorage.Status.Ready == false { return &ctrl.Result{}, nil } + } - for _, nodeAllocation := range nnfNodeStorage.Status.Allocations { - if nodeAllocation.CapacityAllocated > 0 { - allocationSet.AllocationCount++ - } + // Ensure that we found all the NnfNodeStorage resources we were expecting + if len(nnfNodeStorageList.Items) != len(storage.Spec.AllocationSets[allocationSetIndex].Nodes) { + log.Info("Bad count", "found", len(nnfNodeStorageList.Items), "expected", len(storage.Spec.AllocationSets[allocationSetIndex].Nodes)) + return &ctrl.Result{}, nil + } - nodeAllocation.StoragePool.Health.UpdateIfWorseThan(&health) - nodeAllocation.StorageGroup.Health.UpdateIfWorseThan(&health) - nodeAllocation.FileSystem.Health.UpdateIfWorseThan(&health) - nodeAllocation.FileShare.Health.UpdateIfWorseThan(&health) + storage.Status.AllocationSets[allocationSetIndex].Ready = true - nodeAllocation.StoragePool.Status.UpdateIfWorseThan(&status) - nodeAllocation.StorageGroup.Status.UpdateIfWorseThan(&status) - nodeAllocation.FileSystem.Status.UpdateIfWorseThan(&status) - nodeAllocation.FileShare.Status.UpdateIfWorseThan(&status) - } + return nil, nil +} - if nnfNodeStorage.Status.Error != nil { - storage.Status.SetResourceError(nnfNodeStorage.Status.Error) - } +func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, nnfStorage *nnfv1alpha1.NnfStorage) (*ctrl.Result, error) { + log := r.Log.WithValues("NnfStorage", client.ObjectKeyFromObject(nnfStorage)) + + // Don't create the clientmount in the test environment. Some tests don't fake out the + // NnfStorage enough to have it be successful. + if _, found := os.LookupEnv("NNF_TEST_ENVIRONMENT"); found { + return nil, nil } - allocationSet.Health = health - allocationSet.Status = status + if nnfStorage.Spec.FileSystemType != "lustre" { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("invalid file system type '%s' for setLustreOwnerGroup", nnfStorage.Spec.FileSystemType).WithFatal() + } - return nil, nil -} + index := func() int { + for i, allocationSet := range nnfStorage.Spec.AllocationSets { + if allocationSet.Name == "ost" { + return i + } + } + return -1 + }() -// setLustreOwnerGroup sets the "SetOwnerGroup" field in the NnfNodeStorage for OST 0 in a Lustre -// file system. This tells the node controller on the Rabbit to mount the Lustre file system and set -// the owner and group. -func (r *NnfStorageReconciler) setLustreOwnerGroup(ctx context.Context, storage *nnfv1alpha1.NnfStorage, allocationSetIndex int) (*ctrl.Result, error) { - allocationSet := storage.Spec.AllocationSets[allocationSetIndex] + if index == -1 { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("no ost allocation set").WithFatal() + } + allocationSet := nnfStorage.Spec.AllocationSets[index] if len(allocationSet.Nodes) == 0 { - return nil, nil + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("zero length node array for OST").WithFatal() + } + + tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") + if len(tempMountDir) == 0 { + tempMountDir = "/mnt/tmp/" } - nnfNodeStorage := &nnfv1alpha1.NnfNodeStorage{ + clientMount := &dwsv1alpha2.ClientMount{ ObjectMeta: metav1.ObjectMeta{ - Name: nnfNodeStorageName(storage, allocationSetIndex, 0), + Name: fmt.Sprintf("%s-ownergroup", nnfStorage.Name), Namespace: allocationSet.Nodes[0].Name, }, } - if err := r.Get(ctx, client.ObjectKeyFromObject(nnfNodeStorage), nnfNodeStorage); err != nil { - return nil, err - } + if err := r.Get(ctx, client.ObjectKeyFromObject(clientMount), clientMount); err != nil { + if !apierrors.IsNotFound(err) { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not get clientmount for setting lustre owner/group").WithError(err).WithMajor() + } + index := func() int { + for i, allocationSet := range nnfStorage.Spec.AllocationSets { + if allocationSet.Name == "ost" { + return i + } + } + return -1 + }() + + if index == -1 { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("no ost allocation set").WithFatal() + } - if !nnfNodeStorage.Spec.SetOwnerGroup { - nnfNodeStorage.Spec.SetOwnerGroup = true + allocationSet := nnfStorage.Spec.AllocationSets[index] + if len(allocationSet.Nodes) == 0 { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("zero length node array for OST").WithFatal() + } - if err := r.Update(ctx, nnfNodeStorage); err != nil { - return nil, err + tempMountDir := os.Getenv("NNF_TEMP_MOUNT_PATH") + if len(tempMountDir) == 0 { + tempMountDir = "/mnt/tmp/" } + + dwsv1alpha2.InheritParentLabels(clientMount, nnfStorage) + dwsv1alpha2.AddOwnerLabels(clientMount, nnfStorage) + + clientMount.Spec.Node = allocationSet.Nodes[0].Name + clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountStateMounted + clientMount.Spec.Mounts = []dwsv1alpha2.ClientMountInfo{ + dwsv1alpha2.ClientMountInfo{ + Type: nnfStorage.Spec.FileSystemType, + TargetType: "directory", + MountPath: fmt.Sprintf("/%s/%s", tempMountDir, nnfNodeStorageName(nnfStorage, index, 0)), + Device: dwsv1alpha2.ClientMountDevice{ + Type: dwsv1alpha2.ClientMountDeviceTypeLustre, + Lustre: &dwsv1alpha2.ClientMountDeviceLustre{ + FileSystemName: allocationSet.FileSystemName, + MgsAddresses: nnfStorage.Status.MgsAddress, + }, + DeviceReference: &dwsv1alpha2.ClientMountDeviceReference{ + ObjectReference: corev1.ObjectReference{ + Name: nnfNodeStorageName(nnfStorage, index, 0), + Namespace: allocationSet.Nodes[0].Name, + }, + }, + }, + + UserID: nnfStorage.Spec.UserID, + GroupID: nnfStorage.Spec.GroupID, + SetPermissions: true, + }, + } + + if err := r.Create(ctx, clientMount); err != nil { + return &ctrl.Result{}, dwsv1alpha2.NewResourceError("could not create lustre owner/group ClientMount resource").WithError(err).WithMajor() + } + + log.Info("Created clientMount for setting Lustre owner/group") + + return &ctrl.Result{}, nil } - if nnfNodeStorage.Status.OwnerGroupStatus != nnfv1alpha1.ResourceReady { + if clientMount.Status.Error != nil { + nnfStorage.Status.SetResourceError(clientMount.Status.Error) + } + + switch clientMount.Status.Mounts[0].State { + case dwsv1alpha2.ClientMountStateMounted: + if clientMount.Status.Mounts[0].Ready == false { + return &ctrl.Result{}, nil + } + + clientMount.Spec.DesiredState = dwsv1alpha2.ClientMountStateUnmounted + if err := r.Update(ctx, clientMount); err != nil { + if !apierrors.IsConflict(err) { + return &ctrl.Result{}, err + } + + return &ctrl.Result{Requeue: true}, nil + } + + log.Info("Updated clientMount to unmount Lustre owner/group mount") + return &ctrl.Result{}, nil + case dwsv1alpha2.ClientMountStateUnmounted: + if clientMount.Status.Mounts[0].Ready == false { + return &ctrl.Result{}, nil + } + + // The ClientMount successfully unmounted. It will be deleted when the NnfStorage is deleted + return nil, nil } - return nil, nil + return &ctrl.Result{}, nil } // Delete all the child NnfNodeStorage resources. Don't trust the client cache @@ -526,7 +748,9 @@ func nnfNodeStorageName(storage *nnfv1alpha1.NnfStorage, allocationSetIndex int, // SetupWithManager sets up the controller with the Manager. func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { r.ChildObjects = []dwsv1alpha2.ObjectList{ + &dwsv1alpha2.ClientMountList{}, &nnfv1alpha1.NnfNodeStorageList{}, + &nnfv1alpha1.NnfNodeBlockStorageList{}, &nnfv1alpha1.NnfStorageProfileList{}, } @@ -535,5 +759,7 @@ func (r *NnfStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). For(&nnfv1alpha1.NnfStorage{}). Watches(&nnfv1alpha1.NnfNodeStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&nnfv1alpha1.NnfNodeBlockStorage{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). + Watches(&dwsv1alpha2.ClientMount{}, handler.EnqueueRequestsFromMapFunc(dwsv1alpha2.OwnerLabelMapFunc)). Complete(r) } diff --git a/internal/controller/nnf_workflow_controller.go b/internal/controller/nnf_workflow_controller.go index 0f0f1e9c3..d056e9f8b 100644 --- a/internal/controller/nnf_workflow_controller.go +++ b/internal/controller/nnf_workflow_controller.go @@ -37,6 +37,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -459,7 +460,7 @@ func (r *NnfWorkflowReconciler) finishSetupState(ctx context.Context, workflow * return Requeue("error").withObject(nnfStorage), nil } - if nnfStorage.Status.Status != nnfv1alpha1.ResourceReady { + if nnfStorage.Status.Ready == false { // RequeueAfter is necessary for persistent storage that isn't owned by this workflow return Requeue("allocation set not ready").after(2 * time.Second).withObject(nnfStorage), nil } @@ -807,13 +808,6 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * return unmountResult, nil } - access := &nnfv1alpha1.NnfAccess{ - ObjectMeta: metav1.ObjectMeta{ - Name: indexedResourceName(workflow, index) + "-computes", - Namespace: workflow.Namespace, - }, - } - // Create container service and jobs if dwArgs["command"] == "container" { result, err := r.userContainerHandler(ctx, workflow, dwArgs, index, log) @@ -828,11 +822,25 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * return nil, nil } + pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, index) + nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() + } + + access := &nnfv1alpha1.NnfAccess{ + ObjectMeta: metav1.ObjectMeta{ + Name: indexedResourceName(workflow, index) + "-computes", + Namespace: workflow.Namespace, + }, + } + // Create an NNFAccess for the compute clients result, err := ctrl.CreateOrUpdate(ctx, r.Client, access, func() error { dwsv1alpha2.AddWorkflowLabels(access, workflow) dwsv1alpha2.AddOwnerLabels(access, workflow) + addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) access.Spec.TeardownState = dwsv1alpha2.StatePostRun @@ -840,7 +848,7 @@ func (r *NnfWorkflowReconciler) startPreRunState(ctx context.Context, workflow * access.Spec.UserID = workflow.Spec.UserID access.Spec.GroupID = workflow.Spec.GroupID access.Spec.Target = "single" - access.Spec.MountPath = buildMountPath(workflow, index) + access.Spec.MountPath = buildComputeMountPath(workflow, index) access.Spec.ClientReference = corev1.ObjectReference{ Name: workflow.Name, Namespace: workflow.Namespace, @@ -930,7 +938,7 @@ func (r *NnfWorkflowReconciler) finishPreRunState(ctx context.Context, workflow return nil, dwsv1alpha2.NewResourceError("unexpected directive: %v", dwArgs["command"]).WithFatal().WithUserMessage("could not mount file system on compute nodes") } - workflow.Status.Env[envName] = buildMountPath(workflow, index) + workflow.Status.Env[envName] = buildComputeMountPath(workflow, index) // Containers do not have NNFAccesses, so only do this after r.waitForContainersToStart() would have returned result, err := r.waitForNnfAccessStateAndReady(ctx, workflow, index, "mounted") diff --git a/internal/controller/nnf_workflow_controller_helpers.go b/internal/controller/nnf_workflow_controller_helpers.go index 4a07fdcb3..e135ef008 100644 --- a/internal/controller/nnf_workflow_controller_helpers.go +++ b/internal/controller/nnf_workflow_controller_helpers.go @@ -605,7 +605,7 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * // Copy the existing PersistentStorageInstance data if present to prevent picking a different // MGS for _, allocationSet := range nnfStorage.Spec.AllocationSets { - mgsNid = allocationSet.NnfStorageLustreSpec.ExternalMgsNid + mgsNid = allocationSet.NnfStorageLustreSpec.MgsAddress persistentMgsReference = allocationSet.NnfStorageLustreSpec.PersistentMgsReference break } @@ -632,11 +632,11 @@ func (r *NnfWorkflowReconciler) createNnfStorage(ctx context.Context, workflow * nnfAllocSet.Name = s.Spec.AllocationSets[i].Label nnfAllocSet.Capacity = s.Spec.AllocationSets[i].AllocationSize if dwArgs["type"] == "lustre" { - nnfAllocSet.NnfStorageLustreSpec.TargetType = strings.ToUpper(s.Spec.AllocationSets[i].Label) + nnfAllocSet.NnfStorageLustreSpec.TargetType = s.Spec.AllocationSets[i].Label nnfAllocSet.NnfStorageLustreSpec.BackFs = "zfs" nnfAllocSet.NnfStorageLustreSpec.FileSystemName = "z" + string(s.GetUID())[:7] if len(mgsNid) > 0 { - nnfAllocSet.NnfStorageLustreSpec.ExternalMgsNid = mgsNid + nnfAllocSet.NnfStorageLustreSpec.MgsAddress = mgsNid nnfAllocSet.NnfStorageLustreSpec.PersistentMgsReference = persistentMgsReference } } @@ -709,7 +709,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("unexpected number of allocation sets '%d' for persistent MGS", len(nnfStorage.Spec.AllocationSets)).WithFatal() } - if len(nnfStorage.Status.MgsNode) == 0 { + if len(nnfStorage.Status.MgsAddress) == 0 { return corev1.ObjectReference{}, "", dwsv1alpha2.NewResourceError("no LNid listed for persistent MGS").WithFatal() } @@ -717,7 +717,7 @@ func (r *NnfWorkflowReconciler) getLustreMgsFromPool(ctx context.Context, pool s Kind: reflect.TypeOf(dwsv1alpha2.PersistentStorageInstance{}).Name(), Name: persistentStorage.Name, Namespace: persistentStorage.Namespace, - }, nnfStorage.Status.MgsNode, nil + }, nnfStorage.Status.MgsAddress, nil } func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, path string, log logr.Logger) *lusv1beta1.LustreFileSystem { @@ -737,6 +737,12 @@ func (r *NnfWorkflowReconciler) findLustreFileSystemForPath(ctx context.Context, } func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, storage *nnfv1alpha1.NnfStorage, workflow *dwsv1alpha2.Workflow, index int, parentDwIndex int, teardownState dwsv1alpha2.WorkflowState, log logr.Logger) (*nnfv1alpha1.NnfAccess, error) { + pinnedName, pinnedNamespace := getStorageReferenceNameFromWorkflowActual(workflow, parentDwIndex) + nnfStorageProfile, err := findPinnedProfile(ctx, r.Client, pinnedNamespace, pinnedName) + if err != nil { + return nil, dwsv1alpha2.NewResourceError("could not find pinned NnfStorageProfile: %v", types.NamespacedName{Name: pinnedName, Namespace: pinnedNamespace}).WithError(err).WithFatal() + } + access := &nnfv1alpha1.NnfAccess{ ObjectMeta: metav1.ObjectMeta{ Name: indexedResourceName(workflow, parentDwIndex) + "-servers", @@ -748,8 +754,9 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st func() error { dwsv1alpha2.AddWorkflowLabels(access, workflow) dwsv1alpha2.AddOwnerLabels(access, workflow) - nnfv1alpha1.AddDataMovementTeardownStateLabel(access, teardownState) + addPinnedStorageProfileLabel(access, nnfStorageProfile) addDirectiveIndexLabel(access, index) + nnfv1alpha1.AddDataMovementTeardownStateLabel(access, teardownState) access.Spec = nnfv1alpha1.NnfAccessSpec{ DesiredState: "mounted", @@ -757,8 +764,8 @@ func (r *NnfWorkflowReconciler) setupNnfAccessForServers(ctx context.Context, st Target: "all", UserID: workflow.Spec.UserID, GroupID: workflow.Spec.GroupID, - MountPath: buildMountPath(workflow, parentDwIndex), - MountPathPrefix: buildMountPath(workflow, parentDwIndex), + MountPath: buildServerMountPath(workflow, parentDwIndex), + MountPathPrefix: buildServerMountPath(workflow, parentDwIndex), // NNF Storage is Namespaced Name to the servers object StorageReference: corev1.ObjectReference{ @@ -808,8 +815,20 @@ func (r *NnfWorkflowReconciler) getDirectiveFileSystemType(ctx context.Context, } } -func buildMountPath(workflow *dwsv1alpha2.Workflow, index int) string { - return fmt.Sprintf("/mnt/nnf/%s-%d", workflow.UID, index) +func buildComputeMountPath(workflow *dwsv1alpha2.Workflow, index int) string { + prefix := os.Getenv("COMPUTE_MOUNT_PREFIX") + if len(prefix) == 0 { + prefix = "/mnt/nnf" + } + return fmt.Sprintf("/%s/%s-%d", prefix, workflow.UID, index) +} + +func buildServerMountPath(workflow *dwsv1alpha2.Workflow, index int) string { + prefix := os.Getenv("SERVER_MOUNT_PREFIX") + if len(prefix) == 0 { + prefix = "/mnt/nnf" + } + return fmt.Sprintf("/%s/%s-%d", prefix, workflow.UID, index) } func (r *NnfWorkflowReconciler) findPersistentInstance(ctx context.Context, wf *dwsv1alpha2.Workflow, psiName string) (*dwsv1alpha2.PersistentStorageInstance, error) { diff --git a/internal/controller/nnf_workflow_controller_test.go b/internal/controller/nnf_workflow_controller_test.go index e4c206dc3..e50f0e41e 100644 --- a/internal/controller/nnf_workflow_controller_test.go +++ b/internal/controller/nnf_workflow_controller_test.go @@ -25,6 +25,7 @@ import ( "os" "reflect" "strings" + "sync" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" @@ -56,11 +57,39 @@ var _ = Describe("NNF Workflow Unit Tests", func() { var ( key types.NamespacedName workflow *dwsv1alpha2.Workflow + setup sync.Once storageProfile *nnfv1alpha1.NnfStorageProfile + nnfNode *nnfv1alpha1.NnfNode + namespace *corev1.Namespace persistentStorageName string ) BeforeEach(func() { + setup.Do(func() { + namespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: "rabbit-node", + }} + + Expect(k8sClient.Create(context.TODO(), namespace)).To(Succeed()) + + nnfNode = &nnfv1alpha1.NnfNode{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nnf-nlc", + Namespace: "rabbit-node", + }, + Spec: nnfv1alpha1.NnfNodeSpec{ + State: nnfv1alpha1.ResourceEnable, + }, + } + Expect(k8sClient.Create(context.TODO(), nnfNode)).To(Succeed()) + + Eventually(func(g Gomega) error { + g.Expect(k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(nnfNode), nnfNode)).To(Succeed()) + nnfNode.Status.LNetNid = "1.2.3.4@tcp0" + return k8sClient.Update(context.TODO(), nnfNode) + }).Should(Succeed(), "set LNet Nid in NnfNode") + }) wfid := uuid.NewString()[0:8] persistentStorageName = "persistent-" + uuid.NewString()[:8] @@ -116,7 +145,6 @@ var _ = Describe("NNF Workflow Unit Tests", func() { Eventually(func() error { // Delete can still return the cached object. Wait until the object is no longer present return k8sClient.Get(context.TODO(), client.ObjectKeyFromObject(storageProfile), profExpected) }).ShouldNot(Succeed()) - }) getErroredDriverStatus := func(workflow *dwsv1alpha2.Workflow) *dwsv1alpha2.WorkflowDriverStatus { @@ -506,7 +534,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { "Namespace": Equal(lustre.Namespace), })) - Expect(dm.Spec.Destination.Path).To(Equal(buildMountPath(workflow, 0) + "/my-file.out")) + Expect(dm.Spec.Destination.Path).To(Equal(buildComputeMountPath(workflow, 0) + "/my-file.out")) Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ @@ -575,7 +603,7 @@ var _ = Describe("NNF Workflow Unit Tests", func() { "Namespace": Equal(lustre.Namespace), })) - Expect(dm.Spec.Destination.Path).To(Equal(buildMountPath(workflow, 0) + "/my-persistent-file.out")) + Expect(dm.Spec.Destination.Path).To(Equal(buildComputeMountPath(workflow, 0) + "/my-persistent-file.out")) Expect(dm.Spec.Destination.StorageReference).ToNot(BeNil()) Expect(dm.Spec.Destination.StorageReference).To(MatchFields(IgnoreExtras, Fields{ diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8a5b01562..7360aeaca 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -246,6 +246,13 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) + err = (&NnfNodeBlockStorageReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("NnfNodeBlockStorage"), + Scheme: testEnv.Scheme, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + err = (&NnfStorageReconciler{ Client: k8sManager.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("NnfStorage"), diff --git a/mount-daemon/main.go b/mount-daemon/main.go new file mode 100644 index 000000000..71b6670e3 --- /dev/null +++ b/mount-daemon/main.go @@ -0,0 +1,291 @@ +/* + * Copyright 2021-2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "runtime" + "strings" + "syscall" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + "github.com/takama/daemon" + corev1 "k8s.io/api/core/v1" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + + kruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + certutil "k8s.io/client-go/util/cert" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + dwsv1alpha2 "github.com/DataWorkflowServices/dws/api/v1alpha2" + nnfv1alpha1 "github.com/NearNodeFlash/nnf-sos/api/v1alpha1" + controllers "github.com/NearNodeFlash/nnf-sos/internal/controller" + "github.com/NearNodeFlash/nnf-sos/mount-daemon/version" + //+kubebuilder:scaffold:imports +) + +const ( + name = "clientmountd" + description = "Data Workflow Service (DWS) Client Mount Service" +) + +var ( + scheme = kruntime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +type Service struct { + daemon.Daemon +} + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(dwsv1alpha2.AddToScheme(scheme)) + utilruntime.Must(nnfv1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme +} + +func (service *Service) Manage() (string, error) { + + if len(os.Args) > 1 { + command := os.Args[1] + switch command { + case "install": + return service.Install(os.Args[2:]...) + case "remove": + return service.Remove() + case "start": + return service.Start() + case "stop": + return service.Stop() + case "status": + return service.Status() + } + } + + opts := getOptions() + + setupLog.Info("Client Mount Daemon", "Version", version.BuildVersion()) + + config, err := createManager(opts) + if err != nil { + return "Create", err + } + + // Set up channel on which to send signal notifications; must use a buffered + // channel or risk missing the signal if we're not setup to receive the signal + // when it is sent. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) + + go startManager(config) + + killSignal := <-interrupt + setupLog.Info("Daemon was killed", "signal", killSignal) + return "Exited", nil +} + +type managerConfig struct { + config *rest.Config + namespace string + mock bool + timeout time.Duration +} + +type options struct { + host string + port string + name string + tokenFile string + certFile string + mock bool + timeout time.Duration +} + +func getOptions() *options { + opts := options{ + host: os.Getenv("KUBERNETES_SERVICE_HOST"), + port: os.Getenv("KUBERNETES_SERVICE_PORT"), + name: os.Getenv("NODE_NAME"), + tokenFile: os.Getenv("DWS_CLIENT_MOUNT_SERVICE_TOKEN_FILE"), + certFile: os.Getenv("DWS_CLIENT_MOUNT_SERVICE_CERT_FILE"), + mock: false, + timeout: time.Minute, + } + + flag.StringVar(&opts.host, "kubernetes-service-host", opts.host, "Kubernetes service host address") + flag.StringVar(&opts.port, "kubernetes-service-port", opts.port, "Kubernetes service port number") + flag.StringVar(&opts.name, "node-name", opts.name, "Name of this compute resource") + flag.StringVar(&opts.tokenFile, "service-token-file", opts.tokenFile, "Path to the DWS client mount service token") + flag.StringVar(&opts.certFile, "service-cert-file", opts.certFile, "Path to the DWS client mount service certificate") + flag.BoolVar(&opts.mock, "mock", opts.mock, "Run in mock mode where no client mount operations take place") + flag.DurationVar(&opts.timeout, "command-timeout", opts.timeout, "Timeout value before subcommands are killed") + + zapOptions := zap.Options{ + Development: true, + } + zapOptions.BindFlags(flag.CommandLine) + + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zapOptions))) + + return &opts +} + +func createManager(opts *options) (*managerConfig, error) { + + var config *rest.Config + var err error + + if len(opts.name) == 0 { + longName, err := os.Hostname() + if err != nil { + return nil, err + } + parts := strings.Split(longName, ".") + opts.name = parts[0] + setupLog.Info("Using system hostname", "name", opts.name) + } + + if len(opts.host) == 0 && len(opts.port) == 0 { + setupLog.Info("Using kubeconfig rest configuration") + + config, err = ctrl.GetConfig() + if err != nil { + return nil, err + } + + } else { + setupLog.Info("Using default rest configuration") + + if len(opts.host) == 0 || len(opts.port) == 0 { + return nil, fmt.Errorf("kubernetes service host/port not defined") + } + + if len(opts.tokenFile) == 0 { + return nil, fmt.Errorf("DWS client mount service token not defined") + } + + token, err := os.ReadFile(opts.tokenFile) + if err != nil { + return nil, fmt.Errorf("DWS client mount service token failed to read") + } + + if len(opts.certFile) == 0 { + return nil, fmt.Errorf("DWS client mount service certificate file not defined") + } + + if _, err := certutil.NewPool(opts.certFile); err != nil { + return nil, fmt.Errorf("DWS client mount service certificate invalid") + } + + tlsClientConfig := rest.TLSClientConfig{} + tlsClientConfig.CAFile = opts.certFile + + config = &rest.Config{ + Host: "https://" + net.JoinHostPort(opts.host, opts.port), + TLSClientConfig: tlsClientConfig, + BearerToken: string(token), + BearerTokenFile: opts.tokenFile, + } + } + + return &managerConfig{config: config, namespace: opts.name, mock: opts.mock, timeout: opts.timeout}, nil +} + +func startManager(config *managerConfig) { + setupLog.Info("GOMAXPROCS", "value", runtime.GOMAXPROCS(0)) + + namespaceCache := make(map[string]cache.Config) + namespaceCache[config.namespace] = cache.Config{} + namespaceCache[corev1.NamespaceDefault] = cache.Config{} + + mgr, err := ctrl.NewManager(config.config, ctrl.Options{ + Scheme: scheme, + LeaderElection: false, + Cache: cache.Options{DefaultNamespaces: namespaceCache}, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controllers.NnfClientMountReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("ClientMount"), + // Mock: config.mock, + // Timeout: config.timeout, + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClientMount") + os.Exit(1) + } + + //+kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func main() { + + if len(os.Args) > 1 && os.Args[1] == "version" { + fmt.Println("Version", version.BuildVersion()) + os.Exit(0) + } + + kindFn := func() daemon.Kind { + if runtime.GOOS == "darwin" { + return daemon.UserAgent + } + return daemon.SystemDaemon + } + + d, err := daemon.New(name, description, kindFn(), "network-online.target") + if err != nil { + setupLog.Error(err, "Could not create daemon") + os.Exit(1) + } + + service := &Service{d} + + status, err := service.Manage() + if err != nil { + setupLog.Error(err, status) + os.Exit(1) + } + + fmt.Println(status) +} diff --git a/mount-daemon/version/version.go b/mount-daemon/version/version.go new file mode 100644 index 000000000..1dc0d0b7b --- /dev/null +++ b/mount-daemon/version/version.go @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package version + +// The current clientmountd version string. +var ( + // Version contains the current version of the clientmount daemon. + // This is a version tag. If there are commits past that tag, then + // a count is appended with a short git hash of the latest commit. + version = "v0.0.0" +) + +func BuildVersion() string { return version } diff --git a/pkg/blockdevice/blockdevice.go b/pkg/blockdevice/blockdevice.go new file mode 100644 index 000000000..5d28a29c2 --- /dev/null +++ b/pkg/blockdevice/blockdevice.go @@ -0,0 +1,42 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockdevice + +import "context" + +type BlockDevice interface { + // Create the block device (e.g., LVM pvcreate, vgcreate, and lvcreate) + Create(ctx context.Context, complete bool) (bool, error) + + // Destroy the block device (e.g., LVM pvremove, vgremove, and lvremove) + Destroy(ctx context.Context) (bool, error) + + // Activate the block device (e.g., LVM lockstart and lvchange --activate y) + Activate(ctx context.Context) (bool, error) + + // Deactivate the block device (e.g., LVM lockstop and lvchange --activate n) + Deactivate(ctx context.Context) (bool, error) + + // Get device /dev path + GetDevice() string + + // Check if the block device has already been formatted for a file system + CheckFormatted() bool +} diff --git a/pkg/blockdevice/lvm.go b/pkg/blockdevice/lvm.go new file mode 100644 index 000000000..a32df81a3 --- /dev/null +++ b/pkg/blockdevice/lvm.go @@ -0,0 +1,217 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockdevice + +import ( + "context" + "fmt" + "strings" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice/lvm" + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/go-logr/logr" +) + +type LvmPvCommandArgs struct { + Create string + Remove string +} + +type LvmVgCommandArgs struct { + Create string + Remove string + LockStart string + LockStop string +} + +type LvmLvCommandArgs struct { + Create string + Remove string + Activate string + Deactivate string +} + +type LvmCommandArgs struct { + PvArgs LvmPvCommandArgs + VgArgs LvmVgCommandArgs + LvArgs LvmLvCommandArgs +} + +type Lvm struct { + Log logr.Logger + CommandArgs LvmCommandArgs + + PhysicalVolumes []*lvm.PhysicalVolume + VolumeGroup *lvm.VolumeGroup + LogicalVolume *lvm.LogicalVolume +} + +// Check that Lvm implements the BlockDevice interface +var _ BlockDevice = &Lvm{} + +func (l *Lvm) Create(ctx context.Context, complete bool) (bool, error) { + if complete { + return false, nil + } + + objectCreated := false + + for _, pv := range l.PhysicalVolumes { + created, err := pv.Create(ctx, l.CommandArgs.PvArgs.Create) + if err != nil { + return false, err + } + if created { + objectCreated = true + } + } + + created, err := l.VolumeGroup.Create(ctx, l.CommandArgs.VgArgs.Create) + if err != nil { + return false, err + } + if created { + objectCreated = true + } + + if len(l.CommandArgs.VgArgs.LockStart) > 0 { + created, err := l.VolumeGroup.LockStart(ctx, l.CommandArgs.VgArgs.LockStart) + if err != nil { + return false, err + } + if created { + objectCreated = true + } + } + + created, err = l.LogicalVolume.Create(ctx, l.CommandArgs.LvArgs.Create) + if err != nil { + return false, err + } + if created { + objectCreated = true + } + + return objectCreated, nil +} + +func (l *Lvm) Destroy(ctx context.Context) (bool, error) { + objectDestroyed := false + + vgExists, err := l.VolumeGroup.Exists(ctx) + if err != nil { + return false, err + } + + if vgExists && len(l.CommandArgs.VgArgs.LockStart) > 0 { + destroyed, err := l.VolumeGroup.LockStart(ctx, l.CommandArgs.VgArgs.LockStart) + if err != nil { + return false, err + } + if destroyed { + objectDestroyed = true + } + } + + destroyed, err := l.LogicalVolume.Remove(ctx, l.CommandArgs.LvArgs.Remove) + if err != nil { + return false, err + + } + if destroyed { + objectDestroyed = true + } + + destroyed, err = l.VolumeGroup.Remove(ctx, l.CommandArgs.VgArgs.Remove) + if err != nil { + return false, err + } + if destroyed { + objectDestroyed = true + } + + for _, pv := range l.PhysicalVolumes { + destroyed, err := pv.Remove(ctx, l.CommandArgs.PvArgs.Remove) + if err != nil { + return false, err + } + if destroyed { + objectDestroyed = true + } + } + + return objectDestroyed, nil +} + +func (l *Lvm) Activate(ctx context.Context) (bool, error) { + // Make sure the that locking has been started on the VG. The node might have been rebooted + // since the VG was created + if len(l.CommandArgs.VgArgs.LockStart) > 0 { + _, err := l.VolumeGroup.LockStart(ctx, l.CommandArgs.VgArgs.LockStart) + if err != nil { + return false, err + } + } + + if len(l.CommandArgs.LvArgs.Activate) > 0 { + _, err := l.LogicalVolume.Activate(ctx, l.CommandArgs.LvArgs.Activate) + if err != nil { + return false, err + } + } + + return false, nil +} + +func (l *Lvm) Deactivate(ctx context.Context) (bool, error) { + + if len(l.CommandArgs.LvArgs.Deactivate) > 0 { + _, err := l.LogicalVolume.Deactivate(ctx, l.CommandArgs.LvArgs.Deactivate) + if err != nil { + return false, err + } + } + + if len(l.CommandArgs.VgArgs.LockStop) > 0 { + _, err := l.VolumeGroup.LockStop(ctx, l.CommandArgs.VgArgs.LockStop) + if err != nil { + return false, err + } + } + + return false, nil +} + +func (l *Lvm) GetDevice() string { + return fmt.Sprintf("/dev/mapper/%s-%s", strings.Replace(l.VolumeGroup.Name, "-", "--", -1), strings.Replace(l.LogicalVolume.Name, "-", "--", -1)) +} + +func (l *Lvm) CheckFormatted() bool { + output, err := command.Run(fmt.Sprintf("wipefs --noheadings --output type %s", l.GetDevice())) + if err != nil { + return false + } + + if len(output) == 0 { + return false + } + + return true +} diff --git a/pkg/blockdevice/lvm/logical_volumes.go b/pkg/blockdevice/lvm/logical_volumes.go new file mode 100644 index 000000000..0156d4ee7 --- /dev/null +++ b/pkg/blockdevice/lvm/logical_volumes.go @@ -0,0 +1,161 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "fmt" + "strings" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" +) + +type LogicalVolume struct { + Name string + VolumeGroup *VolumeGroup +} + +func NewLogicalVolume(ctx context.Context, name string, vg *VolumeGroup) *LogicalVolume { + return &LogicalVolume{ + Name: name, + VolumeGroup: vg, + } +} + +func (lv *LogicalVolume) parseArgs(args string) (string, error) { + deviceNames := []string{} + for _, pv := range lv.VolumeGroup.PhysicalVolumes { + deviceNames = append(deviceNames, pv.Device) + } + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(map[string]string{ + "$DEVICE_NUM": fmt.Sprintf("%d", len(deviceNames)), + "$DEVICE_LIST": strings.Join(deviceNames, " "), + "$VG_NAME": lv.VolumeGroup.Name, + "$LV_NAME": lv.Name, + }) + + if err := varHandler.ListToVars("$DEVICE_LIST", "$DEVICE"); err != nil { + return "", fmt.Errorf("invalid internal device list: %w", err) + } + + return varHandler.ReplaceAll(args), nil +} + +func (lv *LogicalVolume) Create(ctx context.Context, rawArgs string) (bool, error) { + args, err := lv.parseArgs(rawArgs) + if err != nil { + return false, err + } + + existingLVs, err := lvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingLV := range existingLVs { + if existingLV.Name == lv.Name { + return false, nil + } + } + + if _, err := command.Run(fmt.Sprintf("lvcreate --yes %s", args)); err != nil { + return false, fmt.Errorf("could not create logical volume %s: %w", lv.Name, err) + } + + return true, nil +} + +func (lv *LogicalVolume) Remove(ctx context.Context, rawArgs string) (bool, error) { + args, err := lv.parseArgs(rawArgs) + if err != nil { + return false, err + } + + existingLVs, err := lvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingLV := range existingLVs { + if existingLV.Name == lv.Name { + if _, err := command.Run(fmt.Sprintf("lvremove --yes %s", args)); err != nil { + return false, fmt.Errorf("could not destroy logical volume %s: %w", lv.Name, err) + } + + return true, nil + } + } + + return true, nil +} + +func (lv *LogicalVolume) Change(ctx context.Context, rawArgs string) (bool, error) { + args, err := lv.parseArgs(rawArgs) + if err != nil { + return false, err + } + + if _, err := command.Run(fmt.Sprintf("lvchange %s", args)); err != nil { + return false, fmt.Errorf("could not change logical volume %s: %w", lv.Name, err) + } + + return true, nil +} + +func (lv *LogicalVolume) Activate(ctx context.Context, rawArgs string) (bool, error) { + existingLVs, err := lvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingLV := range existingLVs { + if existingLV.Name == lv.Name { + if existingLV.Attrs[4] == 'a' { + return false, nil + } + + return lv.Change(ctx, rawArgs) + } + } + + return false, nil +} + +func (lv *LogicalVolume) Deactivate(ctx context.Context, rawArgs string) (bool, error) { + existingLVs, err := lvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingLV := range existingLVs { + if existingLV.Name == lv.Name { + if existingLV.Attrs[4] != 'a' { + return false, nil + } + + return lv.Change(ctx, rawArgs) + } + } + + return false, nil +} diff --git a/pkg/blockdevice/lvm/lvs.go b/pkg/blockdevice/lvm/lvs.go new file mode 100644 index 000000000..45a2d182e --- /dev/null +++ b/pkg/blockdevice/lvm/lvs.go @@ -0,0 +1,64 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" +) + +type lvsOutput struct { + Report []lvsReport `json:"report"` +} + +type lvsReport struct { + LV []lvsLogicalVolume `json:"lv"` +} + +type lvsLogicalVolume struct { + Name string `json:"lv_name"` + VGName string `json:"vg_name"` + Attrs string `json:"lv_attr"` + Size string `json:"lv_size"` +} + +func lvsListVolumes(ctx context.Context) ([]lvsLogicalVolume, error) { + output, err := command.Run("lvs --reportformat json") + if err != nil { + return nil, fmt.Errorf("could not list logical volumes: %w", err) + } + + lvsOutput := lvsOutput{} + + if err := json.Unmarshal([]byte(output), &lvsOutput); err != nil { + return nil, err + } + + // If there are multiple reports, combine all the logical volumes into a single list + logicalVolumes := []lvsLogicalVolume{} + for _, report := range lvsOutput.Report { + logicalVolumes = append(logicalVolumes, report.LV...) + } + + return logicalVolumes, nil +} diff --git a/pkg/blockdevice/lvm/physical_volumes.go b/pkg/blockdevice/lvm/physical_volumes.go new file mode 100644 index 000000000..5ad83fdcd --- /dev/null +++ b/pkg/blockdevice/lvm/physical_volumes.go @@ -0,0 +1,102 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "fmt" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" +) + +type PhysicalVolume struct { + Device string +} + +func NewPhysicalVolume(ctx context.Context, device string) *PhysicalVolume { + return &PhysicalVolume{ + Device: device, + } +} + +func (pv *PhysicalVolume) parseArgs(args string, device string) (string, error) { + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(map[string]string{ + "$DEVICE": device, + }) + + return varHandler.ReplaceAll(args), nil +} + +func (pv *PhysicalVolume) Create(ctx context.Context, rawArgs string) (bool, error) { + args, err := pv.parseArgs(rawArgs, pv.Device) + if err != nil { + return false, err + } + + existingPVs, err := pvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingPV := range existingPVs { + if existingPV.Name == pv.Device { + return false, nil + } + } + + // No existing LVM PV found. Create one + if _, err := command.Run(fmt.Sprintf("pvcreate %s", args)); err != nil { + if err != nil { + return false, fmt.Errorf("could not create LVM physical volume: %w", err) + } + } + + return true, nil +} + +func (pv *PhysicalVolume) Remove(ctx context.Context, rawArgs string) (bool, error) { + args, err := pv.parseArgs(rawArgs, pv.Device) + if err != nil { + return false, err + } + + existingPVs, err := pvsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingPV := range existingPVs { + if existingPV.Name == pv.Device { + // LVM PV found. Delete it + if _, err := command.Run(fmt.Sprintf("pvremove --yes %s", args)); err != nil { + if err != nil { + return false, fmt.Errorf("could not destroy LVM physical volume: %w", err) + } + } + + return true, nil + } + } + + return false, nil +} diff --git a/pkg/blockdevice/lvm/pvs.go b/pkg/blockdevice/lvm/pvs.go new file mode 100644 index 000000000..0412d8d08 --- /dev/null +++ b/pkg/blockdevice/lvm/pvs.go @@ -0,0 +1,64 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" +) + +type pvsOutput struct { + Report []pvsReport `json:"report"` +} + +type pvsReport struct { + LV []pvsPhysicalVolume `json:"pv"` +} + +type pvsPhysicalVolume struct { + Name string `json:"pv_name"` + VGName string `json:"vg_name"` + Attrs string `json:"pv_attr"` + Size string `json:"pv_size"` +} + +func pvsListVolumes(ctx context.Context) ([]pvsPhysicalVolume, error) { + output, err := command.Run("pvs --reportformat json") + if err != nil { + return nil, fmt.Errorf("could not list physical volumes: %w", err) + } + + pvsOutput := pvsOutput{} + + if err := json.Unmarshal([]byte(output), &pvsOutput); err != nil { + return nil, err + } + + // If there are multiple reports, combine all the physical volumes into a single list + physicalVolumes := []pvsPhysicalVolume{} + for _, report := range pvsOutput.Report { + physicalVolumes = append(physicalVolumes, report.LV...) + } + + return physicalVolumes, nil +} diff --git a/pkg/blockdevice/lvm/vgs.go b/pkg/blockdevice/lvm/vgs.go new file mode 100644 index 000000000..8bb6463e1 --- /dev/null +++ b/pkg/blockdevice/lvm/vgs.go @@ -0,0 +1,65 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" +) + +type vgsOutput struct { + Report []vgsReport `json:"report"` +} + +type vgsReport struct { + LV []vgsVolumeGroup `json:"vg"` +} + +type vgsVolumeGroup struct { + Name string `json:"vg_name"` + PVCount string `json:"pv_count"` + LVCount string `json:"lv_count"` + Attrs string `json:"vg_attr"` + Size string `json:"vg_size"` +} + +func vgsListVolumes(ctx context.Context) ([]vgsVolumeGroup, error) { + output, err := command.Run("vgs --reportformat json") + if err != nil { + return nil, fmt.Errorf("could not list volume groups: %w", err) + } + + vgsOutput := vgsOutput{} + + if err := json.Unmarshal([]byte(output), &vgsOutput); err != nil { + return nil, err + } + + // If there are multiple reports, combine all the volume groups into a single list + volumeGroups := []vgsVolumeGroup{} + for _, report := range vgsOutput.Report { + volumeGroups = append(volumeGroups, report.LV...) + } + + return volumeGroups, nil +} diff --git a/pkg/blockdevice/lvm/volume_groups.go b/pkg/blockdevice/lvm/volume_groups.go new file mode 100644 index 000000000..07cb7542d --- /dev/null +++ b/pkg/blockdevice/lvm/volume_groups.go @@ -0,0 +1,162 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package lvm + +import ( + "context" + "fmt" + "strings" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" +) + +type VolumeGroup struct { + Name string + PhysicalVolumes []*PhysicalVolume + Shared bool +} + +func NewVolumeGroup(ctx context.Context, name string, pvs []*PhysicalVolume) *VolumeGroup { + return &VolumeGroup{ + Name: name, + PhysicalVolumes: pvs, + } +} + +func (vg *VolumeGroup) Exists(ctx context.Context) (bool, error) { + existingVGs, err := vgsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingVG := range existingVGs { + if existingVG.Name == vg.Name { + return true, nil + } + } + + return false, nil +} + +func (vg *VolumeGroup) parseArgs(args string) (string, error) { + deviceNames := []string{} + for _, pv := range vg.PhysicalVolumes { + deviceNames = append(deviceNames, pv.Device) + } + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(map[string]string{ + "$DEVICE_NUM": fmt.Sprintf("%d", len(deviceNames)), + "$DEVICE_LIST": strings.Join(deviceNames, " "), + "$VG_NAME": vg.Name, + }) + + if err := varHandler.ListToVars("$DEVICE_LIST", "$DEVICE"); err != nil { + return "", fmt.Errorf("invalid internal device list: %w", err) + } + + return varHandler.ReplaceAll(args), nil +} + +func (vg *VolumeGroup) Create(ctx context.Context, rawArgs string) (bool, error) { + args, err := vg.parseArgs(rawArgs) + if err != nil { + return false, err + } + + existingVGs, err := vgsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingVG := range existingVGs { + if existingVG.Name == vg.Name { + return false, nil + } + } + + if _, err := command.Run(fmt.Sprintf("vgcreate %s", args)); err != nil { + return false, fmt.Errorf("could not create volume group: %w", err) + } + + return true, nil +} + +func (vg *VolumeGroup) Change(ctx context.Context, rawArgs string) (bool, error) { + args, err := vg.parseArgs(rawArgs) + if err != nil { + return false, err + } + + if _, err := command.Run(fmt.Sprintf("vgchange %s", args)); err != nil { + return false, err + } + + return true, nil +} + +func (vg *VolumeGroup) LockStart(ctx context.Context, rawArgs string) (bool, error) { + return vg.Change(ctx, rawArgs) +} + +func (vg *VolumeGroup) LockStop(ctx context.Context, rawArgs string) (bool, error) { + exists, err := vg.Exists(ctx) + if err != nil { + return false, err + } + + if exists == false { + return false, nil + } + + lvs, err := lvsListVolumes(ctx) + for _, lv := range lvs { + if lv.VGName == vg.Name && lv.Attrs[4] == 'a' { + return false, nil + } + } + + return vg.Change(ctx, rawArgs) +} + +func (vg *VolumeGroup) Remove(ctx context.Context, rawArgs string) (bool, error) { + args, err := vg.parseArgs(rawArgs) + if err != nil { + return false, err + } + + existingVGs, err := vgsListVolumes(ctx) + if err != nil { + return false, err + } + + for _, existingVG := range existingVGs { + if existingVG.Name == vg.Name { + if _, err := command.Run(fmt.Sprintf("vgremove --yes %s", args)); err != nil { + return false, fmt.Errorf("could not destroy volume group: %w", err) + } + + return true, nil + } + } + + return false, nil +} diff --git a/pkg/blockdevice/mock.go b/pkg/blockdevice/mock.go new file mode 100644 index 000000000..16f3c6d6e --- /dev/null +++ b/pkg/blockdevice/mock.go @@ -0,0 +1,69 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockdevice + +import ( + "context" + + "github.com/go-logr/logr" +) + +type MockBlockDevice struct { + Log logr.Logger +} + +// Check that Mock implements the BlockDevice interface +var _ BlockDevice = &MockBlockDevice{} + +func (m *MockBlockDevice) Create(ctx context.Context, complete bool) (bool, error) { + if complete { + return false, nil + } + + m.Log.Info("Created mock block device") + + return true, nil +} + +func (m *MockBlockDevice) Destroy(ctx context.Context) (bool, error) { + m.Log.Info("Destroyed mock block device") + + return true, nil +} + +func (m *MockBlockDevice) Activate(ctx context.Context) (bool, error) { + m.Log.Info("Dctivated mock block device") + + return true, nil +} + +func (m *MockBlockDevice) Deactivate(ctx context.Context) (bool, error) { + m.Log.Info("Deactivated mock block device") + + return true, nil +} + +func (m *MockBlockDevice) GetDevice() string { + return "/dev/mock" +} + +func (m *MockBlockDevice) CheckFormatted() bool { + return false +} diff --git a/pkg/blockdevice/nvme/nvme.go b/pkg/blockdevice/nvme/nvme.go new file mode 100644 index 000000000..bf34d68ab --- /dev/null +++ b/pkg/blockdevice/nvme/nvme.go @@ -0,0 +1,99 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvme + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" +) + +type NvmeDevice struct { + DevicePath string + NSID uint32 + NQN string +} + +type nvmeListVerboseNamespaces struct { + Device string `json:"NameSpace"` + NSID uint32 `json:"NSID"` +} + +type nvmeListVerboseControllers struct { + Namespaces []nvmeListVerboseNamespaces `json:"Namespaces"` +} + +type nvmeListVerboseDevice struct { + SubsystemNQN string `json:"SubsystemNQN"` + Controllers []nvmeListVerboseControllers `json:"Controllers"` +} + +type nvmeListVerboseDevices struct { + Devices []nvmeListVerboseDevice `json:"Devices"` +} + +func NvmeListDevices() ([]NvmeDevice, error) { + devices := []NvmeDevice{} + + data, err := command.Run("nvme list -v --output-format=json") + if err != nil { + return nil, err + } + + foundDevices := nvmeListVerboseDevices{} + if err := json.Unmarshal([]byte(data), &foundDevices); err != nil { + return nil, err + } + + for _, device := range foundDevices.Devices { + for _, controller := range device.Controllers { + for _, namespace := range controller.Namespaces { + devices = append(devices, NvmeDevice{DevicePath: "/dev/" + namespace.Device, NSID: namespace.NSID, NQN: device.SubsystemNQN}) + } + } + } + + return devices, nil +} + +func NvmeRescanDevices() error { + devices, err := ioutil.ReadDir("/dev/") + if err != nil { + return fmt.Errorf("could not read /dev: %w", err) + } + + nvmeDevices := []string{} + nvmeRegex, _ := regexp.Compile("nvme[0-9]+$") + for _, device := range devices { + if match := nvmeRegex.MatchString(device.Name()); match { + nvmeDevices = append(nvmeDevices, "/dev/"+device.Name()) + } + } + + if _, err := command.Run("nvme ns-rescan " + strings.Join(nvmeDevices, " ")); err != nil { + return fmt.Errorf("could not rescan NVMe devices: %w", err) + } + + return nil +} diff --git a/pkg/blockdevice/zpool.go b/pkg/blockdevice/zpool.go new file mode 100644 index 000000000..4aa81ace9 --- /dev/null +++ b/pkg/blockdevice/zpool.go @@ -0,0 +1,124 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockdevice + +import ( + "context" + "fmt" + "strings" + + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" + "github.com/go-logr/logr" +) + +type ZpoolCommandArgs struct { + Create string + + Vars map[string]string +} + +type Zpool struct { + Log logr.Logger + CommandArgs ZpoolCommandArgs + + Devices []string + Name string + DataSet string +} + +// Check that Lvm implements the BlockDevice interface +var _ BlockDevice = &Zpool{} + +func (z *Zpool) parseArgs(args string) string { + m := map[string]string{ + "$DEVICE_NUM": fmt.Sprintf("%d", len(z.Devices)), + "$DEVICE_LIST": strings.Join(z.Devices, " "), + "$POOL_NAME": z.Name, + } + + for k, v := range z.CommandArgs.Vars { + m[k] = v + } + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(m) + return varHandler.ReplaceAll(args) +} + +func (z *Zpool) Create(ctx context.Context, complete bool) (bool, error) { + output, err := command.Run("zpool list -H") + if err != nil { + if err != nil { + return false, fmt.Errorf("could not list zpools") + } + } + + // Check whether the zpool already exists + for _, line := range strings.Split(output, "\n") { + fields := strings.Fields(line) + if len(fields) > 0 && fields[0] == z.Name { + if fields[9] == "ONLINE" { + return false, nil + } + return false, fmt.Errorf("zpool has unexpected health %s", fields[9]) + } + } + + if _, err := command.Run(fmt.Sprintf("zpool create %s", z.parseArgs(z.CommandArgs.Create))); err != nil { + if err != nil { + return false, fmt.Errorf("could not create file system: %w", err) + } + } + + return true, nil +} + +func (z *Zpool) Destroy(ctx context.Context) (bool, error) { + _, _ = command.Run(fmt.Sprintf("zpool destroy %s", z.Name)) + + return false, nil +} + +func (z *Zpool) Activate(ctx context.Context) (bool, error) { + return false, nil +} + +func (z *Zpool) Deactivate(ctx context.Context) (bool, error) { + return false, nil +} + +func (z *Zpool) GetDevice() string { + // The zpool device is just the name of the zpool + return fmt.Sprintf("%s/%s", z.Name, z.DataSet) +} + +func (z *Zpool) CheckFormatted() bool { + output, err := command.Run(fmt.Sprintf("zfs get -H lustre:fsname %s", z.GetDevice())) + if err != nil { + return false + } + + if len(output) == 0 { + return false + } + + return true +} diff --git a/pkg/command/cmd.go b/pkg/command/cmd.go new file mode 100644 index 000000000..6d8b8bf47 --- /dev/null +++ b/pkg/command/cmd.go @@ -0,0 +1,74 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package command + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "strconv" + "time" + + "github.com/go-logr/logr" +) + +var log logr.Logger + +func RunWithTimeout(args string, timeout int) (string, error) { + + ctx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + } + + var stdout, stderr bytes.Buffer + shellCmd := exec.CommandContext(ctx, "bash", "-c", args) + shellCmd.Stdout = &stdout + shellCmd.Stderr = &stderr + + log.Info("Run", "command", args) + + err := shellCmd.Run() + if err != nil { + return stdout.String(), fmt.Errorf("command: %s - stderr: %s - stdout: %s - error: %w", args, stderr.String(), stdout.String(), err) + } + + // Command success, return stdout + return stdout.String(), nil +} + +func Run(args string) (string, error) { + timeoutString, found := os.LookupEnv("NNF_COMMAND_TIMEOUT_SECONDS") + if found { + timeout, err := strconv.Atoi(timeoutString) + if err != nil { + return "", err + } + + return RunWithTimeout(args, timeout) + } + + return RunWithTimeout(args, 0) +} diff --git a/pkg/filesystem/filesystem.go b/pkg/filesystem/filesystem.go new file mode 100644 index 000000000..71b800630 --- /dev/null +++ b/pkg/filesystem/filesystem.go @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package filesystem + +import "context" + +type FileSystem interface { + // Create the file system (e.g., mkfs) + Create(ctx context.Context, complete bool) (bool, error) + + // Destroy the file system (e.g., wipefs) + Destroy(ctx context.Context) (bool, error) + + // Activate the file system (e.g., mount Lustre target) + Activate(ctx context.Context, complete bool) (bool, error) + + // Deactivate the file system (e.g., unmount Lustre target) + Deactivate(ctx context.Context) (bool, error) + + // Mount the file system + Mount(ctx context.Context, path string, options string, complete bool) (bool, error) + + // Unmount the file system + Unmount(ctx context.Context, path string) (bool, error) + + // Set the UID and GID for the file system + SetPermissions(ctx context.Context, uid uint32, gid uint32, complete bool) (bool, error) +} diff --git a/pkg/filesystem/kind.go b/pkg/filesystem/kind.go new file mode 100644 index 000000000..28214a238 --- /dev/null +++ b/pkg/filesystem/kind.go @@ -0,0 +1,101 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package filesystem + +import ( + "context" + "fmt" + "os" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" + "github.com/go-logr/logr" +) + +type KindFileSystem struct { + Log logr.Logger + Path string + + BlockDevice blockdevice.BlockDevice +} + +// Check that LustreFileSystem implements the FileSystem interface +var _ FileSystem = &MockFileSystem{} + +func (m *KindFileSystem) Create(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + if err := os.MkdirAll(m.Path, 0755); err != nil { + return false, fmt.Errorf("could not create mount directory %s: %w", m.Path, err) + } + + m.Log.Info("Created mock file system", "path", m.Path) + return true, nil +} + +func (m *KindFileSystem) Destroy(ctx context.Context) (bool, error) { + // Remove the directory. If it fails don't worry about it. + _ = os.RemoveAll(m.Path) + + m.Log.Info("Destroyed mock file system") + return true, nil +} + +func (m *KindFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + m.Log.Info("Activated mock file system") + return true, nil +} + +func (m *KindFileSystem) Deactivate(ctx context.Context) (bool, error) { + m.Log.Info("Deactivated mock file system") + return true, nil +} + +func (m *KindFileSystem) Mount(ctx context.Context, path string, options string, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + if err := os.Symlink(m.Path, path); err != nil { + return false, fmt.Errorf("could not create symlink mount %s: %w", path, err) + } + + m.Log.Info("Mounted mock file system", "filesystem", m.Path, "mount", path) + return true, nil +} + +func (m *KindFileSystem) Unmount(ctx context.Context, path string) (bool, error) { + // Remove the directory. If it fails don't worry about it. + _ = os.Remove(path) + + m.Log.Info("Unmounted mock file system") + return true, nil +} + +func (m *KindFileSystem) SetPermissions(ctx context.Context, uid uint32, gid uint32, complete bool) (bool, error) { + m.Log.Info("Set mock file system permissions") + + return false, nil +} diff --git a/pkg/filesystem/lustre.go b/pkg/filesystem/lustre.go new file mode 100644 index 000000000..72d7364f9 --- /dev/null +++ b/pkg/filesystem/lustre.go @@ -0,0 +1,268 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package filesystem + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" + "github.com/go-logr/logr" + + mount "k8s.io/mount-utils" +) + +type LustreFileSystemCommandArgs struct { + Mkfs string + MountTarget string + Mount string + + Vars map[string]string +} + +type LustreFileSystem struct { + Log logr.Logger + CommandArgs LustreFileSystemCommandArgs + + Name string + TargetType string + MgsAddress string + Index int + BackFs string + + BlockDevice blockdevice.BlockDevice +} + +// Check that LustreFileSystem implements the FileSystem interface +var _ FileSystem = &LustreFileSystem{} + +func (l *LustreFileSystem) parseArgs(args string) string { + m := map[string]string{ + "$DEVICE": l.BlockDevice.GetDevice(), + "$ZVOL_NAME": l.BlockDevice.GetDevice(), + "$MGS_NID": l.MgsAddress, + "$INDEX": fmt.Sprintf("%d", l.Index), + "$FS_NAME": l.Name, + "$BACKFS": l.BackFs, + } + + for k, v := range l.CommandArgs.Vars { + m[k] = v + } + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(m) + return varHandler.ReplaceAll(args) +} + +func (l *LustreFileSystem) Create(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + // If the device is already formatted, don't run the mkfs again + if l.BlockDevice.CheckFormatted() { + return false, nil + } + + if _, err := command.Run(fmt.Sprintf("mkfs -t lustre %s", l.parseArgs(l.CommandArgs.Mkfs))); err != nil { + if err != nil { + return false, fmt.Errorf("could not create file system: %w", err) + } + } + return true, nil +} + +func (l *LustreFileSystem) Destroy(ctx context.Context) (bool, error) { + + return false, nil +} + +func (l *LustreFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + path := filepath.Clean(fmt.Sprintf("/mnt/nnf/lustre/%s.%s.%d", l.Name, l.TargetType, l.Index)) + for _, m := range mounts { + if m.Path != path { + continue + } + + // Found an existing mount at this path. Check if it's the mount we expect + if m.Device != l.BlockDevice.GetDevice() || m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + // The Lustre target is already mounted. Nothing left to do + return false, nil + } + + // Create the mount directory + if err := os.MkdirAll(path, 0755); err != nil { + return false, fmt.Errorf("could not create mount directory %s: %w", path, err) + } + + if _, err := l.BlockDevice.Activate(ctx); err != nil { + return false, fmt.Errorf("could not activate block device for mounting %s: %w", path, err) + } + + // Run the mount command + mountCmd := fmt.Sprintf("mount -t lustre %s %s", l.BlockDevice.GetDevice(), path) + if l.CommandArgs.MountTarget != "" { + mountCmd = mountCmd + " -o " + l.parseArgs(l.CommandArgs.MountTarget) + } + + if _, err := command.Run(mountCmd); err != nil { + if _, err := l.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after failed mount %s: %w", path, err) + } + + return false, fmt.Errorf("could not mount file system %s: %w", path, err) + } + + return true, nil +} + +func (l *LustreFileSystem) Deactivate(ctx context.Context) (bool, error) { + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + path := filepath.Clean(fmt.Sprintf("/mnt/nnf/lustre/%s.%s.%d", l.Name, l.TargetType, l.Index)) + for _, m := range mounts { + if m.Path != path { + continue + } + + // Found an existing mount at this path. Check if it's the mount we expect + if m.Device != l.BlockDevice.GetDevice() || m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + if _, err := command.Run(fmt.Sprintf("umount %s", path)); err != nil { + return false, fmt.Errorf("could not unmount file system %s: %w", path, err) + } + + // Remove the directory. If it fails don't worry about it. + _ = os.Remove(path) + + if _, err := l.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after unmount %s: %w", path, err) + } + + return true, nil + } + + // Try to deactivate the block device in case the deactivate failed after the unmount above + if _, err := l.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after unmount %s: %w", path, err) + } + + // file system already unmounted + return false, nil +} + +func (l *LustreFileSystem) Mount(ctx context.Context, path string, options string, complete bool) (bool, error) { + path = filepath.Clean(path) + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + for _, m := range mounts { + if m.Path != path { + continue + } + + // Found an existing mount at this path. Check if it's the mount we expect + if m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + // The file system is already mounted. Nothing left to do + return false, nil + } + + // Create the mount directory + if err := os.MkdirAll(path, 0755); err != nil { + return false, fmt.Errorf("could not create mount directory %s: %w", path, err) + } + + // Run the mount command + if len(options) == 0 { + options = l.CommandArgs.Mount + } + mountCmd := fmt.Sprintf("mount -t lustre %s:/%s %s", l.MgsAddress, l.Name, path) + if len(options) > 0 { + mountCmd = mountCmd + " -o " + l.parseArgs(options) + } + + if _, err := command.Run(mountCmd); err != nil { + return false, fmt.Errorf("could not mount file system %s: %w", path, err) + } + + return true, nil +} + +func (l *LustreFileSystem) Unmount(ctx context.Context, path string) (bool, error) { + path = filepath.Clean(path) + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + for _, m := range mounts { + if m.Path != path { + continue + } + + // Found an existing mount at this path. Check if it's the mount we expect + if m.Device != fmt.Sprintf("%s:/%s", l.MgsAddress, l.Name) || m.Type != "lustre" { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + if _, err := command.Run(fmt.Sprintf("umount %s", path)); err != nil { + return false, fmt.Errorf("could not unmount file system %s: %w", path, err) + } + + // Remove the directory. If it fails don't worry about it. + _ = os.Remove(path) + + return true, nil + } + + // file system already unmounted + return false, nil +} + +func (l *LustreFileSystem) SetPermissions(ctx context.Context, uid uint32, gid uint32, complete bool) (bool, error) { + return false, nil +} diff --git a/pkg/filesystem/mock.go b/pkg/filesystem/mock.go new file mode 100644 index 000000000..e63d7ac1f --- /dev/null +++ b/pkg/filesystem/mock.go @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package filesystem + +import ( + "context" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" + "github.com/go-logr/logr" +) + +type MockFileSystem struct { + Log logr.Logger + Path string + + BlockDevice blockdevice.BlockDevice +} + +// Check that LustreFileSystem implements the FileSystem interface +var _ FileSystem = &MockFileSystem{} + +func (m *MockFileSystem) Create(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + m.Log.Info("Created mock file system", "path", m.Path) + return true, nil +} + +func (m *MockFileSystem) Destroy(ctx context.Context) (bool, error) { + m.Log.Info("Destroyed mock file system") + + return true, nil +} + +func (m *MockFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + m.Log.Info("Activated mock file system") + return true, nil +} + +func (m *MockFileSystem) Deactivate(ctx context.Context) (bool, error) { + m.Log.Info("Deactivated mock file system") + + return true, nil +} + +func (m *MockFileSystem) Mount(ctx context.Context, path string, options string, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + m.Log.Info("Mounted mock file system", "filesystem", m.Path, "mount", path) + return true, nil +} + +func (m *MockFileSystem) Unmount(ctx context.Context, path string) (bool, error) { + m.Log.Info("Unmounted mock file system") + + return true, nil +} + +func (m *MockFileSystem) SetPermissions(ctx context.Context, uid uint32, gid uint32, complete bool) (bool, error) { + m.Log.Info("Set mock file system permissions") + + return false, nil +} diff --git a/pkg/filesystem/simple.go b/pkg/filesystem/simple.go new file mode 100644 index 000000000..361f8294a --- /dev/null +++ b/pkg/filesystem/simple.go @@ -0,0 +1,251 @@ +/* + * Copyright 2023 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package filesystem + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/NearNodeFlash/nnf-sos/pkg/blockdevice" + "github.com/NearNodeFlash/nnf-sos/pkg/command" + "github.com/NearNodeFlash/nnf-sos/pkg/var_handler" + "github.com/go-logr/logr" + + mount "k8s.io/mount-utils" +) + +type SimpleFileSystemCommandArgs struct { + Mkfs string + Mount string + + Vars map[string]string +} + +type SimpleFileSystem struct { + Log logr.Logger + CommandArgs SimpleFileSystemCommandArgs + + Type string + MountTarget string + TempDir string + + BlockDevice blockdevice.BlockDevice +} + +// Check that SimpleFileSystem implements the FileSystem interface +var _ FileSystem = &SimpleFileSystem{} + +func (f *SimpleFileSystem) parseArgs(args string) string { + m := map[string]string{ + "$DEVICE": f.BlockDevice.GetDevice(), + } + + for k, v := range f.CommandArgs.Vars { + m[k] = v + } + + // Initialize the VarHandler substitution variables + varHandler := var_handler.NewVarHandler(m) + return varHandler.ReplaceAll(args) +} + +func (f *SimpleFileSystem) Create(ctx context.Context, complete bool) (bool, error) { + if complete == true { + return false, nil + } + + if f.Type == "none" { + return false, nil + } + + if _, err := f.BlockDevice.Activate(ctx); err != nil { + return false, fmt.Errorf("could not activate block device for mounting: %w", err) + } + + // If the device is already formatted, don't run the mkfs again + if f.BlockDevice.CheckFormatted() { + if _, err := f.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after format shows completed: %w", err) + } + + return false, nil + } + + if _, err := command.Run(fmt.Sprintf("mkfs -t %s %s", f.Type, f.parseArgs(f.CommandArgs.Mkfs))); err != nil { + if err != nil { + return false, fmt.Errorf("could not create file system: %w", err) + } + } + + if _, err := f.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after mkfs: %w", err) + } + + return true, nil +} + +func (f *SimpleFileSystem) Destroy(ctx context.Context) (bool, error) { + return false, nil +} + +func (f *SimpleFileSystem) Activate(ctx context.Context, complete bool) (bool, error) { + return false, nil +} + +func (f *SimpleFileSystem) Deactivate(ctx context.Context) (bool, error) { + return false, nil +} + +func (f *SimpleFileSystem) Mount(ctx context.Context, path string, options string, complete bool) (bool, error) { + path = filepath.Clean(path) + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + for _, m := range mounts { + if m.Path != path { + continue + } + + if f.Type == "none" { + return false, nil + } + + // Found an existing mount at this path. Check if it's the mount we expect + if m.Device != f.BlockDevice.GetDevice() || m.Type != f.Type { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + // The file system is already mounted. Nothing left to do + return false, nil + } + + // Create the mount file or directory + switch f.MountTarget { + case "directory": + if err := os.MkdirAll(path, 0755); err != nil { + return false, fmt.Errorf("could not create mount directory %s: %w", path, err) + } + case "file": + // Create the parent directory and then the file + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return false, fmt.Errorf("could not create parent mount directory %s: %w", filepath.Dir(path), err) + } + + if err := os.WriteFile(path, []byte(""), 0644); err != nil { + return false, fmt.Errorf("could not create mount file %s: %w", path, err) + } + } + + if _, err := f.BlockDevice.Activate(ctx); err != nil { + return false, fmt.Errorf("could not activate block device for mounting %s: %w", path, err) + } + + // Run the mount command + if len(options) == 0 { + options = f.CommandArgs.Mount + } + mountCmd := fmt.Sprintf("mount -t %s %s %s", f.Type, f.BlockDevice.GetDevice(), path) + if len(options) > 0 { + mountCmd = mountCmd + " -o " + f.parseArgs(options) + } + + if _, err := command.Run(mountCmd); err != nil { + if _, err := f.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after failed mount %s: %w", path, err) + } + + return false, fmt.Errorf("could not mount file system %s: %w", path, err) + } + + return true, nil +} + +func (f *SimpleFileSystem) Unmount(ctx context.Context, path string) (bool, error) { + path = filepath.Clean(path) + mounter := mount.New("") + mounts, err := mounter.List() + if err != nil { + return false, err + } + + for _, m := range mounts { + if m.Path != path { + continue + } + + // Found an existing mount at this path. If it's not a bind mount, check if it's the mount device we expect + if f.Type != "none" && (m.Device != f.BlockDevice.GetDevice() || m.Type != f.Type) { + return false, fmt.Errorf("unexpected mount at path %s. Device %s type %s", path, m.Device, m.Type) + } + + if _, err := command.Run(fmt.Sprintf("umount %s", path)); err != nil { + return false, fmt.Errorf("could not unmount file system %s: %w", path, err) + } + + // Remove the file/directory. If it fails don't worry about it. + _ = os.Remove(path) + + if _, err := f.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after unmount %s: %w", path, err) + } + + return true, nil + } + + // Try to deactivate the block device in case the deactivate failed after the unmount above + if _, err := f.BlockDevice.Deactivate(ctx); err != nil { + return false, fmt.Errorf("could not deactivate block device after unmount %s: %w", path, err) + } + + // file system already unmounted + return false, nil +} + +func (f *SimpleFileSystem) SetPermissions(ctx context.Context, userID uint32, groupID uint32, complete bool) (bool, error) { + if complete { + return false, nil + } + + if f.Type == "none" { + return false, nil + } + + if _, err := f.Mount(ctx, f.TempDir, "", false); err != nil { + return false, fmt.Errorf("could not mount temp dir '%s' to set permissions: %w", f.TempDir, err) + } + + if err := os.Chown(f.TempDir, int(userID), int(groupID)); err != nil { + if _, unmountErr := f.Unmount(ctx, f.TempDir); unmountErr != nil { + return false, fmt.Errorf("could not unmount after setting owner permissions failed '%s': %w", f.TempDir, unmountErr) + } + return false, fmt.Errorf("could not set owner permissions '%s': %w", f.TempDir, err) + } + + if _, err := f.Unmount(ctx, f.TempDir); err != nil { + return false, fmt.Errorf("could not unmount after setting owner permissions '%s': %w", f.TempDir, err) + } + + return false, nil +} diff --git a/pkg/var_handler/var_handler.go b/pkg/var_handler/var_handler.go new file mode 100644 index 000000000..05c7f231c --- /dev/null +++ b/pkg/var_handler/var_handler.go @@ -0,0 +1,60 @@ +/* + * Copyright 2022 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package var_handler + +import ( + "fmt" + "strings" +) + +type VarHandler struct { + VarMap map[string]string +} + +func NewVarHandler(vars map[string]string) *VarHandler { + v := &VarHandler{} + v.VarMap = vars + return v +} + +func (v *VarHandler) AddVar(name string, value string) { + v.VarMap[name] = value +} + +// ListToVars splits the value of one of its variables, and creates a new +// indexed variable for each of the items in the split. +func (v *VarHandler) ListToVars(listVarName, newVarPrefix string) error { + theList, ok := v.VarMap[listVarName] + if !ok { + return fmt.Errorf("Unable to find the variable named %s", listVarName) + } + + for i, val := range strings.Split(theList, " ") { + v.VarMap[fmt.Sprintf("%s%d", newVarPrefix, i+1)] = val + } + return nil +} + +func (v *VarHandler) ReplaceAll(s string) string { + for key, value := range v.VarMap { + s = strings.ReplaceAll(s, key, value) + } + return s +} diff --git a/pkg/var_handler/var_handler_test.go b/pkg/var_handler/var_handler_test.go new file mode 100644 index 000000000..5b88f00a4 --- /dev/null +++ b/pkg/var_handler/var_handler_test.go @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Hewlett Packard Enterprise Development LP + * Other additional copyright holders may be indicated within. + * + * The entirety of this work is licensed under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package var_handler + +import ( + "testing" +) + +func TestVarHandler(t *testing.T) { + varMap := map[string]string{ + "$BIGDOG": "Jules", + "$GREYDOG": "Henri", + } + + v := NewVarHandler(varMap) + // Add a late-arriving variable. + v.VarMap["$FAVTOY"] = "rope" + + in1 := "The big dog is $BIGDOG, the little dog is $GREYDOG. $BIGDOG and $GREYDOG are best friends and their favorite toy is the $FAVTOY." + want1 := "The big dog is Jules, the little dog is Henri. Jules and Henri are best friends and their favorite toy is the rope." + out1 := v.ReplaceAll(in1) + if out1 != want1 { + t.Errorf("Did not get the desired result. Got (%s)", out1) + } + + // Change a variable. + v.VarMap["$FAVTOY"] = "ball" + in2 := "$BIGDOG likes the $FAVTOY." + want2 := "Jules likes the ball." + out2 := v.ReplaceAll(in2) + if out2 != want2 { + t.Errorf("Did not get desired result. Got (%s)", out2) + } + + // Delete a variable. + delete(v.VarMap, "$FAVTOY") + in3 := "$GREYDOG's favorite toy was the $FAVTOY." + want3 := "Henri's favorite toy was the $FAVTOY." + out3 := v.ReplaceAll(in3) + if out3 != want3 { + t.Errorf("Did not get desired result. Got (%s)", out3) + } + + // Add a list to turn into variables. + v.VarMap["$DEVICE_LIST"] = "/dev/nvme0n1 /dev/nvme1n1 /dev/nvme0n2 /dev/nvme1n2" + if err := v.ListToVars("$DEVICE_LIST", "$DEVICE"); err != nil { + t.Errorf("Did not split list: %v", err) + } else { + in4 := "zpool mirror $DEVICE1 $DEVICE2 mirror $DEVICE3 $DEVICE4" + want4 := "zpool mirror /dev/nvme0n1 /dev/nvme1n1 mirror /dev/nvme0n2 /dev/nvme1n2" + out4 := v.ReplaceAll(in4) + if out4 != want4 { + t.Errorf("Did not get desired result. Got (%s)", out4) + } + } +} diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/owner_labels.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/owner_labels.go index 7e1d52f9c..de161928c 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/owner_labels.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/owner_labels.go @@ -86,6 +86,7 @@ func AddWorkflowLabels(child metav1.Object, workflow *Workflow) { labels[WorkflowNameLabel] = workflow.Name labels[WorkflowNamespaceLabel] = workflow.Namespace + labels[WorkflowUidLabel] = string(workflow.GetUID()) child.SetLabels(labels) } diff --git a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go index 9d35b43b8..5a895f006 100644 --- a/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go +++ b/vendor/github.com/DataWorkflowServices/dws/api/v1alpha2/workflow_types.go @@ -35,6 +35,9 @@ const ( // WorkflowNamespaceLabel is defined for resources that relate to the namespace of a DWS Workflow WorkflowNamespaceLabel = "dataworkflowservices.github.io/workflow.namespace" + + // WorkflowNamespaceLabel is defined for resources that relate to the namespace of a DWS Workflow + WorkflowUidLabel = "dataworkflowservices.github.io/workflow.uid" ) // WorkflowState is the enumeration of the state of the workflow diff --git a/vendor/github.com/takama/daemon/.gitignore b/vendor/github.com/takama/daemon/.gitignore new file mode 100644 index 000000000..e43b0f988 --- /dev/null +++ b/vendor/github.com/takama/daemon/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/takama/daemon/LICENSE b/vendor/github.com/takama/daemon/LICENSE new file mode 100644 index 000000000..3c1a1697e --- /dev/null +++ b/vendor/github.com/takama/daemon/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 The Go Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/takama/daemon/README.md b/vendor/github.com/takama/daemon/README.md new file mode 100644 index 000000000..27dd6506f --- /dev/null +++ b/vendor/github.com/takama/daemon/README.md @@ -0,0 +1,250 @@ +# Go Daemon + +A daemon package for use with Go (golang) services + +[![GoDoc](https://godoc.org/github.com/takama/daemon?status.svg)](https://godoc.org/github.com/takama/daemon) + +## Examples + +### Simplest example (just install self as daemon) + +```go +package main + +import ( + "fmt" + "log" + + "github.com/takama/daemon" +) + +func main() { + service, err := daemon.New("name", "description", daemon.SystemDaemon) + if err != nil { + log.Fatal("Error: ", err) + } + status, err := service.Install() + if err != nil { + log.Fatal(status, "\nError: ", err) + } + fmt.Println(status) +} +``` + +### Real example + +```go +// Example of a daemon with echo service +package main + +import ( + "fmt" + "log" + "net" + "os" + "os/signal" + "syscall" + + "github.com/takama/daemon" +) + +const ( + + // name of the service + name = "myservice" + description = "My Echo Service" + + // port which daemon should be listen + port = ":9977" +) + +// dependencies that are NOT required by the service, but might be used +var dependencies = []string{"dummy.service"} + +var stdlog, errlog *log.Logger + +// Service has embedded daemon +type Service struct { + daemon.Daemon +} + +// Manage by daemon commands or run the daemon +func (service *Service) Manage() (string, error) { + + usage := "Usage: myservice install | remove | start | stop | status" + + // if received any kind of command, do it + if len(os.Args) > 1 { + command := os.Args[1] + switch command { + case "install": + return service.Install() + case "remove": + return service.Remove() + case "start": + return service.Start() + case "stop": + return service.Stop() + case "status": + return service.Status() + default: + return usage, nil + } + } + + // Do something, call your goroutines, etc + + // Set up channel on which to send signal notifications. + // We must use a buffered channel or risk missing the signal + // if we're not ready to receive when the signal is sent. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, os.Kill, syscall.SIGTERM) + + // Set up listener for defined host and port + listener, err := net.Listen("tcp", port) + if err != nil { + return "Possibly was a problem with the port binding", err + } + + // set up channel on which to send accepted connections + listen := make(chan net.Conn, 100) + go acceptConnection(listener, listen) + + // loop work cycle with accept connections or interrupt + // by system signal + for { + select { + case conn := <-listen: + go handleClient(conn) + case killSignal := <-interrupt: + stdlog.Println("Got signal:", killSignal) + stdlog.Println("Stoping listening on ", listener.Addr()) + listener.Close() + if killSignal == os.Interrupt { + return "Daemon was interruped by system signal", nil + } + return "Daemon was killed", nil + } + } + + // never happen, but need to complete code + return usage, nil +} + +// Accept a client connection and collect it in a channel +func acceptConnection(listener net.Listener, listen chan<- net.Conn) { + for { + conn, err := listener.Accept() + if err != nil { + continue + } + listen <- conn + } +} + +func handleClient(client net.Conn) { + for { + buf := make([]byte, 4096) + numbytes, err := client.Read(buf) + if numbytes == 0 || err != nil { + return + } + client.Write(buf[:numbytes]) + } +} + +func init() { + stdlog = log.New(os.Stdout, "", log.Ldate|log.Ltime) + errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime) +} + +func main() { + srv, err := daemon.New(name, description, daemon.SystemDaemon, dependencies...) + if err != nil { + errlog.Println("Error: ", err) + os.Exit(1) + } + service := &Service{srv} + status, err := service.Manage() + if err != nil { + errlog.Println(status, "\nError: ", err) + os.Exit(1) + } + fmt.Println(status) +} +``` + +### Service config file + +Optionally, service config file can be retrieved or updated by calling +`GetTemplate() string` and `SetTemplate(string)` methods(except MS +Windows). Template will be a default Go Template(`"text/template"`). + +If `SetTemplate` is not called, default template content will be used +while creating service. + +| Variable | Description | +| ------------ | -------------------------------- | +| Description | Description for service | +| Dependencies | Service dependencies | +| Name | Service name | +| Path | Path of service executable | +| Args | Arguments for service executable | + +#### Example template(for linux systemv) + +```ini +[Unit] +Description={{.Description}} +Requires={{.Dependencies}} +After={{.Dependencies}} + +[Service] +PIDFile=/var/run/{{.Name}}.pid +ExecStartPre=/bin/rm -f /var/run/{{.Name}}.pid +ExecStart={{.Path}} {{.Args}} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + +### Cron example + +See `examples/cron/cron_job.go` + +## Contributors (unsorted) + +- [Sheile](https://github.com/Sheile) +- [Nguyen Trung Loi](https://github.com/loint) +- [Donny Prasetyobudi](https://github.com/donnpebe) +- [Mark Berner](https://github.com/mark2b) +- [Fatih Kaya](https://github.com/fatihky) +- [Jannick Fahlbusch](https://github.com/jannickfahlbusch) +- [TobyZXJ](https://github.com/tobyzxj) +- [Pichu Chen](https://github.com/PichuChen) +- [Eric Halpern](https://github.com/ehalpern) +- [Yota](https://github.com/nus) +- [Erkan Durmus](https://github.com/derkan) +- [maxxant](https://github.com/maxxant) +- [1for](https://github.com/1for) +- [okamura](https://github.com/sidepelican) +- [0X8C - Demired](https://github.com/Demired) +- [Maximus](https://github.com/maximus12793) +- [AlgorathDev](https://github.com/AlgorathDev) +- [Alexis Camilleri](https://github.com/krysennn) +- [neverland4u](https://github.com/neverland4u) +- [Rustam](https://github.com/rusq) +- [King'ori Maina](https://github.com/itskingori) + +All the contributors are welcome. If you would like to be the contributor please accept some rules. + +- The pull requests will be accepted only in `develop` branch +- All modifications or additions should be tested +- Sorry, We will not accept code with any dependency, only standard library + +Thank you for your understanding! + +## License + +[MIT Public License](https://github.com/takama/daemon/blob/master/LICENSE) diff --git a/vendor/github.com/takama/daemon/daemon.go b/vendor/github.com/takama/daemon/daemon.go new file mode 100644 index 000000000..e5b0fa20a --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon.go @@ -0,0 +1,262 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +/* +Package daemon v1.0.0 for use with Go (golang) services. + +Package daemon provides primitives for daemonization of golang services. In the +current implementation the only supported operating systems are macOS, FreeBSD, +Linux and Windows. Also to note, for global daemons one must have root rights to +install or remove the service. The only exception is macOS where there is an +implementation of a user daemon that can installed or removed by the current +user. + +Example: + + // Example of a daemon with echo service + package main + + import ( + "fmt" + "log" + "net" + "os" + "os/signal" + "syscall" + + "github.com/takama/daemon" + ) + + const ( + + // name of the service + name = "myservice" + description = "My Echo Service" + + // port which daemon should be listen + port = ":9977" + ) + + // dependencies that are NOT required by the service, but might be used + var dependencies = []string{"dummy.service"} + + var stdlog, errlog *log.Logger + + // Service has embedded daemon + type Service struct { + daemon.Daemon + } + + // Manage by daemon commands or run the daemon + func (service *Service) Manage() (string, error) { + + usage := "Usage: myservice install | remove | start | stop | status" + + // if received any kind of command, do it + if len(os.Args) > 1 { + command := os.Args[1] + switch command { + case "install": + return service.Install() + case "remove": + return service.Remove() + case "start": + return service.Start() + case "stop": + return service.Stop() + case "status": + return service.Status() + default: + return usage, nil + } + } + + // Do something, call your goroutines, etc + + // Set up channel on which to send signal notifications. + // We must use a buffered channel or risk missing the signal + // if we're not ready to receive when the signal is sent. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, os.Kill, syscall.SIGTERM) + + // Set up listener for defined host and port + listener, err := net.Listen("tcp", port) + if err != nil { + return "Possibly was a problem with the port binding", err + } + + // set up channel on which to send accepted connections + listen := make(chan net.Conn, 100) + go acceptConnection(listener, listen) + + // loop work cycle with accept connections or interrupt + // by system signal + for { + select { + case conn := <-listen: + go handleClient(conn) + case killSignal := <-interrupt: + stdlog.Println("Got signal:", killSignal) + stdlog.Println("Stoping listening on ", listener.Addr()) + listener.Close() + if killSignal == os.Interrupt { + return "Daemon was interrupted by system signal", nil + } + return "Daemon was killed", nil + } + } + + // never happen, but need to complete code + return usage, nil + } + + // Accept a client connection and collect it in a channel + func acceptConnection(listener net.Listener, listen chan<- net.Conn) { + for { + conn, err := listener.Accept() + if err != nil { + continue + } + listen <- conn + } + } + + func handleClient(client net.Conn) { + for { + buf := make([]byte, 4096) + numbytes, err := client.Read(buf) + if numbytes == 0 || err != nil { + return + } + client.Write(buf[:numbytes]) + } + } + + func init() { + stdlog = log.New(os.Stdout, "", log.Ldate|log.Ltime) + errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime) + } + + func main() { + srv, err := daemon.New(name, description, daemon.SystemDaemon, dependencies...) + if err != nil { + errlog.Println("Error: ", err) + os.Exit(1) + } + service := &Service{srv} + status, err := service.Manage() + if err != nil { + errlog.Println(status, "\nError: ", err) + os.Exit(1) + } + fmt.Println(status) + } + +Go daemon +*/ +package daemon + +import ( + "errors" + "runtime" + "strings" +) + +// Status constants. +const ( + statNotInstalled = "Service not installed" +) + +// Daemon interface has a standard set of methods/commands +type Daemon interface { + // GetTemplate - gets service config template + GetTemplate() string + + // SetTemplate - sets service config template + SetTemplate(string) error + + // Install the service into the system + Install(args ...string) (string, error) + + // Remove the service and all corresponding files from the system + Remove() (string, error) + + // Start the service + Start() (string, error) + + // Stop the service + Stop() (string, error) + + // Status - check the service status + Status() (string, error) + + // Run - run executable service + Run(e Executable) (string, error) +} + +// Executable interface defines controlling methods of executable service +type Executable interface { + // Start - non-blocking start service + Start() + // Stop - non-blocking stop service + Stop() + // Run - blocking run service + Run() +} + +// Kind is type of the daemon +type Kind string + +const ( + // UserAgent is a user daemon that runs as the currently logged in user and + // stores its property list in the user’s individual LaunchAgents directory. + // In other words, per-user agents provided by the user. Valid for macOS only. + UserAgent Kind = "UserAgent" + + // GlobalAgent is a user daemon that runs as the currently logged in user and + // stores its property list in the users' global LaunchAgents directory. In + // other words, per-user agents provided by the administrator. Valid for macOS + // only. + GlobalAgent Kind = "GlobalAgent" + + // GlobalDaemon is a system daemon that runs as the root user and stores its + // property list in the global LaunchDaemons directory. In other words, + // system-wide daemons provided by the administrator. Valid for macOS only. + GlobalDaemon Kind = "GlobalDaemon" + + // SystemDaemon is a system daemon that runs as the root user. In other words, + // system-wide daemons provided by the administrator. Valid for FreeBSD, Linux + // and Windows only. + SystemDaemon Kind = "SystemDaemon" +) + +// New - Create a new daemon +// +// name: name of the service +// +// description: any explanation, what is the service, its purpose +// +// kind: what kind of daemon to create +func New(name, description string, kind Kind, dependencies ...string) (Daemon, error) { + switch runtime.GOOS { + case "darwin": + if kind == SystemDaemon { + return nil, errors.New("Invalid daemon kind specified") + } + case "freebsd": + if kind != SystemDaemon { + return nil, errors.New("Invalid daemon kind specified") + } + case "linux": + if kind != SystemDaemon { + return nil, errors.New("Invalid daemon kind specified") + } + case "windows": + if kind != SystemDaemon { + return nil, errors.New("Invalid daemon kind specified") + } + } + + return newDaemon(strings.Join(strings.Fields(name), "_"), description, kind, dependencies) +} diff --git a/vendor/github.com/takama/daemon/daemon_darwin.go b/vendor/github.com/takama/daemon/daemon_darwin.go new file mode 100644 index 000000000..0c993433e --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_darwin.go @@ -0,0 +1,250 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +// Package daemon darwin (mac os x) version +package daemon + +import ( + "os" + "os/exec" + "os/user" + "path/filepath" + "regexp" + "text/template" +) + +// darwinRecord - standard record (struct) for darwin version of daemon package +type darwinRecord struct { + name string + description string + kind Kind + dependencies []string +} + +func newDaemon(name, description string, kind Kind, dependencies []string) (Daemon, error) { + + return &darwinRecord{name, description, kind, dependencies}, nil +} + +// Standard service path for system daemons +func (darwin *darwinRecord) servicePath() string { + var path string + + switch darwin.kind { + case UserAgent: + usr, _ := user.Current() + path = usr.HomeDir + "/Library/LaunchAgents/" + darwin.name + ".plist" + case GlobalAgent: + path = "/Library/LaunchAgents/" + darwin.name + ".plist" + case GlobalDaemon: + path = "/Library/LaunchDaemons/" + darwin.name + ".plist" + } + + return path +} + +// Is a service installed +func (darwin *darwinRecord) isInstalled() bool { + + if _, err := os.Stat(darwin.servicePath()); err == nil { + return true + } + + return false +} + +// Get executable path +func execPath() (string, error) { + return filepath.Abs(os.Args[0]) +} + +// Check service is running +func (darwin *darwinRecord) checkRunning() (string, bool) { + output, err := exec.Command("launchctl", "list", darwin.name).Output() + if err == nil { + if matched, err := regexp.MatchString(darwin.name, string(output)); err == nil && matched { + reg := regexp.MustCompile("PID\" = ([0-9]+);") + data := reg.FindStringSubmatch(string(output)) + if len(data) > 1 { + return "Service (pid " + data[1] + ") is running...", true + } + return "Service is running...", true + } + } + + return "Service is stopped", false +} + +// Install the service +func (darwin *darwinRecord) Install(args ...string) (string, error) { + installAction := "Install " + darwin.description + ":" + + ok, err := checkPrivileges() + if !ok && darwin.kind != UserAgent { + return installAction + failed, err + } + + srvPath := darwin.servicePath() + + if darwin.isInstalled() { + return installAction + failed, ErrAlreadyInstalled + } + + file, err := os.Create(srvPath) + if err != nil { + return installAction + failed, err + } + defer file.Close() + + execPatch, err := executablePath(darwin.name) + if err != nil { + return installAction + failed, err + } + + templ, err := template.New("propertyList").Parse(propertyList) + if err != nil { + return installAction + failed, err + } + + if err := templ.Execute( + file, + &struct { + Name, Path string + Args []string + }{darwin.name, execPatch, args}, + ); err != nil { + return installAction + failed, err + } + + return installAction + success, nil +} + +// Remove the service +func (darwin *darwinRecord) Remove() (string, error) { + removeAction := "Removing " + darwin.description + ":" + + ok, err := checkPrivileges() + if !ok && darwin.kind != UserAgent { + return removeAction + failed, err + } + + if !darwin.isInstalled() { + return removeAction + failed, ErrNotInstalled + } + + if err := os.Remove(darwin.servicePath()); err != nil { + return removeAction + failed, err + } + + return removeAction + success, nil +} + +// Start the service +func (darwin *darwinRecord) Start() (string, error) { + startAction := "Starting " + darwin.description + ":" + + ok, err := checkPrivileges() + if !ok && darwin.kind != UserAgent { + return startAction + failed, err + } + + if !darwin.isInstalled() { + return startAction + failed, ErrNotInstalled + } + + if _, ok := darwin.checkRunning(); ok { + return startAction + failed, ErrAlreadyRunning + } + + if err := exec.Command("launchctl", "load", darwin.servicePath()).Run(); err != nil { + return startAction + failed, err + } + + return startAction + success, nil +} + +// Stop the service +func (darwin *darwinRecord) Stop() (string, error) { + stopAction := "Stopping " + darwin.description + ":" + + ok, err := checkPrivileges() + if !ok && darwin.kind != UserAgent { + return stopAction + failed, err + } + + if !darwin.isInstalled() { + return stopAction + failed, ErrNotInstalled + } + + if _, ok := darwin.checkRunning(); !ok { + return stopAction + failed, ErrAlreadyStopped + } + + if err := exec.Command("launchctl", "unload", darwin.servicePath()).Run(); err != nil { + return stopAction + failed, err + } + + return stopAction + success, nil +} + +// Status - Get service status +func (darwin *darwinRecord) Status() (string, error) { + + ok, err := checkPrivileges() + if !ok && darwin.kind != UserAgent { + return "", err + } + + if !darwin.isInstalled() { + return statNotInstalled, ErrNotInstalled + } + + statusAction, _ := darwin.checkRunning() + + return statusAction, nil +} + +// Run - Run service +func (darwin *darwinRecord) Run(e Executable) (string, error) { + runAction := "Running " + darwin.description + ":" + e.Run() + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *darwinRecord) GetTemplate() string { + return propertyList +} + +// SetTemplate - sets service config template +func (linux *darwinRecord) SetTemplate(tplStr string) error { + propertyList = tplStr + return nil +} + +var propertyList = ` + + + + KeepAlive + + Label + {{.Name}} + ProgramArguments + + {{.Path}} + {{range .Args}}{{.}} + {{end}} + + RunAtLoad + + WorkingDirectory + /usr/local/var + StandardErrorPath + /usr/local/var/log/{{.Name}}.err + StandardOutPath + /usr/local/var/log/{{.Name}}.log + + +` diff --git a/vendor/github.com/takama/daemon/daemon_freebsd.go b/vendor/github.com/takama/daemon/daemon_freebsd.go new file mode 100644 index 000000000..2d84d5f64 --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_freebsd.go @@ -0,0 +1,275 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "text/template" +) + +// systemVRecord - standard record (struct) for linux systemV version of daemon package +type bsdRecord struct { + name string + description string + kind Kind + dependencies []string +} + +// Standard service path for systemV daemons +func (bsd *bsdRecord) servicePath() string { + return "/usr/local/etc/rc.d/" + bsd.name +} + +// Is a service installed +func (bsd *bsdRecord) isInstalled() bool { + + if _, err := os.Stat(bsd.servicePath()); err == nil { + return true + } + + return false +} + +// Is a service is enabled +func (bsd *bsdRecord) isEnabled() (bool, error) { + rcConf, err := os.Open("/etc/rc.conf") + if err != nil { + fmt.Println("Error opening file:", err) + return false, err + } + defer rcConf.Close() + rcData, _ := ioutil.ReadAll(rcConf) + r, _ := regexp.Compile(`.*` + bsd.name + `_enable="YES".*`) + v := string(r.Find(rcData)) + var chrFound, sharpFound bool + for _, c := range v { + if c == '#' && !chrFound { + sharpFound = true + break + } else if !sharpFound && c != ' ' { + chrFound = true + break + } + } + return chrFound, nil +} + +func (bsd *bsdRecord) getCmd(cmd string) string { + if ok, err := bsd.isEnabled(); !ok || err != nil { + fmt.Println("Service is not enabled, using one" + cmd + " instead") + cmd = "one" + cmd + } + return cmd +} + +// Get the daemon properly +func newDaemon(name, description string, kind Kind, dependencies []string) (Daemon, error) { + return &bsdRecord{name, description, kind, dependencies}, nil +} + +func execPath() (name string, err error) { + name = os.Args[0] + if name[0] == '.' { + name, err = filepath.Abs(name) + if err == nil { + name = filepath.Clean(name) + } + } else { + name, err = exec.LookPath(filepath.Clean(name)) + } + return name, err +} + +// Check service is running +func (bsd *bsdRecord) checkRunning() (string, bool) { + output, err := exec.Command("service", bsd.name, bsd.getCmd("status")).Output() + if err == nil { + if matched, err := regexp.MatchString(bsd.name, string(output)); err == nil && matched { + reg := regexp.MustCompile("pid ([0-9]+)") + data := reg.FindStringSubmatch(string(output)) + if len(data) > 1 { + return "Service (pid " + data[1] + ") is running...", true + } + return "Service is running...", true + } + } + + return "Service is stopped", false +} + +// Install the service +func (bsd *bsdRecord) Install(args ...string) (string, error) { + installAction := "Install " + bsd.description + ":" + + if ok, err := checkPrivileges(); !ok { + return installAction + failed, err + } + + srvPath := bsd.servicePath() + + if bsd.isInstalled() { + return installAction + failed, ErrAlreadyInstalled + } + + file, err := os.Create(srvPath) + if err != nil { + return installAction + failed, err + } + defer file.Close() + + execPatch, err := executablePath(bsd.name) + if err != nil { + return installAction + failed, err + } + + templ, err := template.New("bsdConfig").Parse(bsdConfig) + if err != nil { + return installAction + failed, err + } + + if err := templ.Execute( + file, + &struct { + Name, Description, Path, Args string + }{bsd.name, bsd.description, execPatch, strings.Join(args, " ")}, + ); err != nil { + return installAction + failed, err + } + + if err := os.Chmod(srvPath, 0755); err != nil { + return installAction + failed, err + } + + return installAction + success, nil +} + +// Remove the service +func (bsd *bsdRecord) Remove() (string, error) { + removeAction := "Removing " + bsd.description + ":" + + if ok, err := checkPrivileges(); !ok { + return removeAction + failed, err + } + + if !bsd.isInstalled() { + return removeAction + failed, ErrNotInstalled + } + + if err := os.Remove(bsd.servicePath()); err != nil { + return removeAction + failed, err + } + + return removeAction + success, nil +} + +// Start the service +func (bsd *bsdRecord) Start() (string, error) { + startAction := "Starting " + bsd.description + ":" + + if ok, err := checkPrivileges(); !ok { + return startAction + failed, err + } + + if !bsd.isInstalled() { + return startAction + failed, ErrNotInstalled + } + + if _, ok := bsd.checkRunning(); ok { + return startAction + failed, ErrAlreadyRunning + } + + if err := exec.Command("service", bsd.name, bsd.getCmd("start")).Run(); err != nil { + return startAction + failed, err + } + + return startAction + success, nil +} + +// Stop the service +func (bsd *bsdRecord) Stop() (string, error) { + stopAction := "Stopping " + bsd.description + ":" + + if ok, err := checkPrivileges(); !ok { + return stopAction + failed, err + } + + if !bsd.isInstalled() { + return stopAction + failed, ErrNotInstalled + } + + if _, ok := bsd.checkRunning(); !ok { + return stopAction + failed, ErrAlreadyStopped + } + + if err := exec.Command("service", bsd.name, bsd.getCmd("stop")).Run(); err != nil { + return stopAction + failed, err + } + + return stopAction + success, nil +} + +// Status - Get service status +func (bsd *bsdRecord) Status() (string, error) { + + if ok, err := checkPrivileges(); !ok { + return "", err + } + + if !bsd.isInstalled() { + return statNotInstalled, ErrNotInstalled + } + + statusAction, _ := bsd.checkRunning() + + return statusAction, nil +} + +// Run - Run service +func (bsd *bsdRecord) Run(e Executable) (string, error) { + runAction := "Running " + bsd.description + ":" + e.Run() + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *bsdRecord) GetTemplate() string { + return bsdConfig +} + +// SetTemplate - sets service config template +func (linux *bsdRecord) SetTemplate(tplStr string) error { + bsdConfig = tplStr + return nil +} + +var bsdConfig = `#!/bin/sh +# +# PROVIDE: {{.Name}} +# REQUIRE: networking syslog +# KEYWORD: + +# Add the following lines to /etc/rc.conf to enable the {{.Name}}: +# +# {{.Name}}_enable="YES" +# + + +. /etc/rc.subr + +name="{{.Name}}" +rcvar="{{.Name}}_enable" +command="{{.Path}}" +pidfile="/var/run/$name.pid" + +start_cmd="/usr/sbin/daemon -p $pidfile -f $command {{.Args}}" +load_rc_config $name +run_rc_command "$1" +` diff --git a/vendor/github.com/takama/daemon/daemon_linux.go b/vendor/github.com/takama/daemon/daemon_linux.go new file mode 100644 index 000000000..524c7d8bb --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_linux.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +// Package daemon linux version +package daemon + +import ( + "os" +) + +// Get the daemon properly +func newDaemon(name, description string, kind Kind, dependencies []string) (Daemon, error) { + // newer subsystem must be checked first + if _, err := os.Stat("/run/systemd/system"); err == nil { + return &systemDRecord{name, description, kind, dependencies}, nil + } + if _, err := os.Stat("/sbin/initctl"); err == nil { + return &upstartRecord{name, description, kind, dependencies}, nil + } + return &systemVRecord{name, description, kind, dependencies}, nil +} + +// Get executable path +func execPath() (string, error) { + return os.Readlink("/proc/self/exe") +} diff --git a/vendor/github.com/takama/daemon/daemon_linux_systemd.go b/vendor/github.com/takama/daemon/daemon_linux_systemd.go new file mode 100644 index 000000000..d6e6cca68 --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_linux_systemd.go @@ -0,0 +1,227 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +package daemon + +import ( + "os" + "os/exec" + "regexp" + "strings" + "text/template" +) + +// systemDRecord - standard record (struct) for linux systemD version of daemon package +type systemDRecord struct { + name string + description string + kind Kind + dependencies []string +} + +// Standard service path for systemD daemons +func (linux *systemDRecord) servicePath() string { + return "/etc/systemd/system/" + linux.name + ".service" +} + +// Is a service installed +func (linux *systemDRecord) isInstalled() bool { + + if _, err := os.Stat(linux.servicePath()); err == nil { + return true + } + + return false +} + +// Check service is running +func (linux *systemDRecord) checkRunning() (string, bool) { + output, err := exec.Command("systemctl", "status", linux.name+".service").Output() + if err == nil { + if matched, err := regexp.MatchString("Active: active", string(output)); err == nil && matched { + reg := regexp.MustCompile("Main PID: ([0-9]+)") + data := reg.FindStringSubmatch(string(output)) + if len(data) > 1 { + return "Service (pid " + data[1] + ") is running...", true + } + return "Service is running...", true + } + } + + return "Service is stopped", false +} + +// Install the service +func (linux *systemDRecord) Install(args ...string) (string, error) { + installAction := "Install " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return installAction + failed, err + } + + srvPath := linux.servicePath() + + if linux.isInstalled() { + return installAction + failed, ErrAlreadyInstalled + } + + file, err := os.Create(srvPath) + if err != nil { + return installAction + failed, err + } + defer file.Close() + + execPatch, err := executablePath(linux.name) + if err != nil { + return installAction + failed, err + } + + templ, err := template.New("systemDConfig").Parse(systemDConfig) + if err != nil { + return installAction + failed, err + } + + if err := templ.Execute( + file, + &struct { + Name, Description, Dependencies, Path, Args string + }{ + linux.name, + linux.description, + strings.Join(linux.dependencies, " "), + execPatch, + strings.Join(args, " "), + }, + ); err != nil { + return installAction + failed, err + } + + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + return installAction + failed, err + } + + if err := exec.Command("systemctl", "enable", linux.name+".service").Run(); err != nil { + return installAction + failed, err + } + + return installAction + success, nil +} + +// Remove the service +func (linux *systemDRecord) Remove() (string, error) { + removeAction := "Removing " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return removeAction + failed, err + } + + if !linux.isInstalled() { + return removeAction + failed, ErrNotInstalled + } + + if err := exec.Command("systemctl", "disable", linux.name+".service").Run(); err != nil { + return removeAction + failed, err + } + + if err := os.Remove(linux.servicePath()); err != nil { + return removeAction + failed, err + } + + return removeAction + success, nil +} + +// Start the service +func (linux *systemDRecord) Start() (string, error) { + startAction := "Starting " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return startAction + failed, err + } + + if !linux.isInstalled() { + return startAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); ok { + return startAction + failed, ErrAlreadyRunning + } + + if err := exec.Command("systemctl", "start", linux.name+".service").Run(); err != nil { + return startAction + failed, err + } + + return startAction + success, nil +} + +// Stop the service +func (linux *systemDRecord) Stop() (string, error) { + stopAction := "Stopping " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return stopAction + failed, err + } + + if !linux.isInstalled() { + return stopAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); !ok { + return stopAction + failed, ErrAlreadyStopped + } + + if err := exec.Command("systemctl", "stop", linux.name+".service").Run(); err != nil { + return stopAction + failed, err + } + + return stopAction + success, nil +} + +// Status - Get service status +func (linux *systemDRecord) Status() (string, error) { + + if ok, err := checkPrivileges(); !ok { + return "", err + } + + if !linux.isInstalled() { + return statNotInstalled, ErrNotInstalled + } + + statusAction, _ := linux.checkRunning() + + return statusAction, nil +} + +// Run - Run service +func (linux *systemDRecord) Run(e Executable) (string, error) { + runAction := "Running " + linux.description + ":" + e.Run() + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *systemDRecord) GetTemplate() string { + return systemDConfig +} + +// SetTemplate - sets service config template +func (linux *systemDRecord) SetTemplate(tplStr string) error { + systemDConfig = tplStr + return nil +} + +var systemDConfig = `[Unit] +Description={{.Description}} +Requires={{.Dependencies}} +After={{.Dependencies}} + +[Service] +PIDFile=/var/run/{{.Name}}.pid +ExecStartPre=/bin/rm -f /var/run/{{.Name}}.pid +ExecStart={{.Path}} {{.Args}} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +` diff --git a/vendor/github.com/takama/daemon/daemon_linux_systemv.go b/vendor/github.com/takama/daemon/daemon_linux_systemv.go new file mode 100644 index 000000000..588f6db80 --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_linux_systemv.go @@ -0,0 +1,332 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +package daemon + +import ( + "os" + "os/exec" + "regexp" + "strings" + "text/template" +) + +// systemVRecord - standard record (struct) for linux systemV version of daemon package +type systemVRecord struct { + name string + description string + kind Kind + dependencies []string +} + +// Standard service path for systemV daemons +func (linux *systemVRecord) servicePath() string { + return "/etc/init.d/" + linux.name +} + +// Is a service installed +func (linux *systemVRecord) isInstalled() bool { + + if _, err := os.Stat(linux.servicePath()); err == nil { + return true + } + + return false +} + +// Check service is running +func (linux *systemVRecord) checkRunning() (string, bool) { + output, err := exec.Command("service", linux.name, "status").Output() + if err == nil { + if matched, err := regexp.MatchString(linux.name, string(output)); err == nil && matched { + reg := regexp.MustCompile("pid ([0-9]+)") + data := reg.FindStringSubmatch(string(output)) + if len(data) > 1 { + return "Service (pid " + data[1] + ") is running...", true + } + return "Service is running...", true + } + } + + return "Service is stopped", false +} + +// Install the service +func (linux *systemVRecord) Install(args ...string) (string, error) { + installAction := "Install " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return installAction + failed, err + } + + srvPath := linux.servicePath() + + if linux.isInstalled() { + return installAction + failed, ErrAlreadyInstalled + } + + file, err := os.Create(srvPath) + if err != nil { + return installAction + failed, err + } + defer file.Close() + + execPatch, err := executablePath(linux.name) + if err != nil { + return installAction + failed, err + } + + templ, err := template.New("systemVConfig").Parse(systemVConfig) + if err != nil { + return installAction + failed, err + } + + if err := templ.Execute( + file, + &struct { + Name, Description, Path, Args string + }{linux.name, linux.description, execPatch, strings.Join(args, " ")}, + ); err != nil { + return installAction + failed, err + } + + if err := os.Chmod(srvPath, 0755); err != nil { + return installAction + failed, err + } + + for _, i := range [...]string{"2", "3", "4", "5"} { + if err := os.Symlink(srvPath, "/etc/rc"+i+".d/S87"+linux.name); err != nil { + continue + } + } + for _, i := range [...]string{"0", "1", "6"} { + if err := os.Symlink(srvPath, "/etc/rc"+i+".d/K17"+linux.name); err != nil { + continue + } + } + + return installAction + success, nil +} + +// Remove the service +func (linux *systemVRecord) Remove() (string, error) { + removeAction := "Removing " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return removeAction + failed, err + } + + if !linux.isInstalled() { + return removeAction + failed, ErrNotInstalled + } + + if err := os.Remove(linux.servicePath()); err != nil { + return removeAction + failed, err + } + + for _, i := range [...]string{"2", "3", "4", "5"} { + if err := os.Remove("/etc/rc" + i + ".d/S87" + linux.name); err != nil { + continue + } + } + for _, i := range [...]string{"0", "1", "6"} { + if err := os.Remove("/etc/rc" + i + ".d/K17" + linux.name); err != nil { + continue + } + } + + return removeAction + success, nil +} + +// Start the service +func (linux *systemVRecord) Start() (string, error) { + startAction := "Starting " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return startAction + failed, err + } + + if !linux.isInstalled() { + return startAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); ok { + return startAction + failed, ErrAlreadyRunning + } + + if err := exec.Command("service", linux.name, "start").Run(); err != nil { + return startAction + failed, err + } + + return startAction + success, nil +} + +// Stop the service +func (linux *systemVRecord) Stop() (string, error) { + stopAction := "Stopping " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return stopAction + failed, err + } + + if !linux.isInstalled() { + return stopAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); !ok { + return stopAction + failed, ErrAlreadyStopped + } + + if err := exec.Command("service", linux.name, "stop").Run(); err != nil { + return stopAction + failed, err + } + + return stopAction + success, nil +} + +// Status - Get service status +func (linux *systemVRecord) Status() (string, error) { + + if ok, err := checkPrivileges(); !ok { + return "", err + } + + if !linux.isInstalled() { + return statNotInstalled, ErrNotInstalled + } + + statusAction, _ := linux.checkRunning() + + return statusAction, nil +} + +// Run - Run service +func (linux *systemVRecord) Run(e Executable) (string, error) { + runAction := "Running " + linux.description + ":" + e.Run() + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *systemVRecord) GetTemplate() string { + return systemVConfig +} + +// SetTemplate - sets service config template +func (linux *systemVRecord) SetTemplate(tplStr string) error { + systemVConfig = tplStr + return nil +} + +var systemVConfig = `#! /bin/sh +# +# /etc/rc.d/init.d/{{.Name}} +# +# Starts {{.Name}} as a daemon +# +# chkconfig: 2345 87 17 +# description: Starts and stops a single {{.Name}} instance on this system + +### BEGIN INIT INFO +# Provides: {{.Name}} +# Required-Start: $network $named +# Required-Stop: $network $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: This service manages the {{.Description}}. +# Description: {{.Description}} +### END INIT INFO + +# +# Source function library. +# +if [ -f /etc/rc.d/init.d/functions ]; then + . /etc/rc.d/init.d/functions +fi + +exec="{{.Path}}" +servname="{{.Description}}" + +proc="{{.Name}}" +pidfile="/var/run/$proc.pid" +lockfile="/var/lock/subsys/$proc" +stdoutlog="/var/log/$proc.log" +stderrlog="/var/log/$proc.err" + +[ -d $(dirname $lockfile) ] || mkdir -p $(dirname $lockfile) + +[ -e /etc/sysconfig/$proc ] && . /etc/sysconfig/$proc + +start() { + [ -x $exec ] || exit 5 + + if [ -f $pidfile ]; then + if ! [ -d "/proc/$(cat $pidfile)" ]; then + rm $pidfile + if [ -f $lockfile ]; then + rm $lockfile + fi + fi + fi + + if ! [ -f $pidfile ]; then + printf "Starting $servname:\t" + echo "$(date)" >> $stdoutlog + $exec {{.Args}} >> $stdoutlog 2>> $stderrlog & + echo $! > $pidfile + touch $lockfile + success + echo + else + # failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $servname: " + killproc -p $pidfile $proc + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +rh_status() { + status -p $pidfile $proc +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + status) + rh_status + ;; + *) + echo $"Usage: $0 {start|stop|status|restart}" + exit 2 +esac + +exit $? +` diff --git a/vendor/github.com/takama/daemon/daemon_linux_upstart.go b/vendor/github.com/takama/daemon/daemon_linux_upstart.go new file mode 100644 index 000000000..16216c7b1 --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_linux_upstart.go @@ -0,0 +1,212 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +package daemon + +import ( + "os" + "os/exec" + "regexp" + "strings" + "text/template" +) + +// upstartRecord - standard record (struct) for linux upstart version of daemon package +type upstartRecord struct { + name string + description string + kind Kind + dependencies []string +} + +// Standard service path for systemV daemons +func (linux *upstartRecord) servicePath() string { + return "/etc/init/" + linux.name + ".conf" +} + +// Is a service installed +func (linux *upstartRecord) isInstalled() bool { + + if _, err := os.Stat(linux.servicePath()); err == nil { + return true + } + + return false +} + +// Check service is running +func (linux *upstartRecord) checkRunning() (string, bool) { + output, err := exec.Command("status", linux.name).Output() + if err == nil { + if matched, err := regexp.MatchString(linux.name+" start/running", string(output)); err == nil && matched { + reg := regexp.MustCompile("process ([0-9]+)") + data := reg.FindStringSubmatch(string(output)) + if len(data) > 1 { + return "Service (pid " + data[1] + ") is running...", true + } + return "Service is running...", true + } + } + + return "Service is stopped", false +} + +// Install the service +func (linux *upstartRecord) Install(args ...string) (string, error) { + installAction := "Install " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return installAction + failed, err + } + + srvPath := linux.servicePath() + + if linux.isInstalled() { + return installAction + failed, ErrAlreadyInstalled + } + + file, err := os.Create(srvPath) + if err != nil { + return installAction + failed, err + } + defer file.Close() + + execPatch, err := executablePath(linux.name) + if err != nil { + return installAction + failed, err + } + + templ, err := template.New("upstatConfig").Parse(upstatConfig) + if err != nil { + return installAction + failed, err + } + + if err := templ.Execute( + file, + &struct { + Name, Description, Path, Args string + }{linux.name, linux.description, execPatch, strings.Join(args, " ")}, + ); err != nil { + return installAction + failed, err + } + + if err := os.Chmod(srvPath, 0755); err != nil { + return installAction + failed, err + } + + return installAction + success, nil +} + +// Remove the service +func (linux *upstartRecord) Remove() (string, error) { + removeAction := "Removing " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return removeAction + failed, err + } + + if !linux.isInstalled() { + return removeAction + failed, ErrNotInstalled + } + + if err := os.Remove(linux.servicePath()); err != nil { + return removeAction + failed, err + } + + return removeAction + success, nil +} + +// Start the service +func (linux *upstartRecord) Start() (string, error) { + startAction := "Starting " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return startAction + failed, err + } + + if !linux.isInstalled() { + return startAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); ok { + return startAction + failed, ErrAlreadyRunning + } + + if err := exec.Command("start", linux.name).Run(); err != nil { + return startAction + failed, err + } + + return startAction + success, nil +} + +// Stop the service +func (linux *upstartRecord) Stop() (string, error) { + stopAction := "Stopping " + linux.description + ":" + + if ok, err := checkPrivileges(); !ok { + return stopAction + failed, err + } + + if !linux.isInstalled() { + return stopAction + failed, ErrNotInstalled + } + + if _, ok := linux.checkRunning(); !ok { + return stopAction + failed, ErrAlreadyStopped + } + + if err := exec.Command("stop", linux.name).Run(); err != nil { + return stopAction + failed, err + } + + return stopAction + success, nil +} + +// Status - Get service status +func (linux *upstartRecord) Status() (string, error) { + + if ok, err := checkPrivileges(); !ok { + return "", err + } + + if !linux.isInstalled() { + return statNotInstalled, ErrNotInstalled + } + + statusAction, _ := linux.checkRunning() + + return statusAction, nil +} + +// Run - Run service +func (linux *upstartRecord) Run(e Executable) (string, error) { + runAction := "Running " + linux.description + ":" + e.Run() + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *upstartRecord) GetTemplate() string { + return upstatConfig +} + +// SetTemplate - sets service config template +func (linux *upstartRecord) SetTemplate(tplStr string) error { + upstatConfig = tplStr + return nil +} + +var upstatConfig = `# {{.Name}} {{.Description}} + +description "{{.Description}}" +author "Pichu Chen " + +start on runlevel [2345] +stop on runlevel [016] + +respawn +#kill timeout 5 + +exec {{.Path}} {{.Args}} >> /var/log/{{.Name}}.log 2>> /var/log/{{.Name}}.err +` diff --git a/vendor/github.com/takama/daemon/daemon_windows.go b/vendor/github.com/takama/daemon/daemon_windows.go new file mode 100644 index 000000000..047b63532 --- /dev/null +++ b/vendor/github.com/takama/daemon/daemon_windows.go @@ -0,0 +1,355 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +// Package daemon windows version +package daemon + +import ( + "errors" + "fmt" + "os/exec" + "strconv" + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/windows/registry" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" +) + +// windowsRecord - standard record (struct) for windows version of daemon package +type windowsRecord struct { + name string + description string + kind Kind + dependencies []string +} + +func newDaemon(name, description string, kind Kind, dependencies []string) (Daemon, error) { + + return &windowsRecord{name, description, kind, dependencies}, nil +} + +// Install the service +func (windows *windowsRecord) Install(args ...string) (string, error) { + installAction := "Install " + windows.description + ":" + + execp, err := execPath() + + if err != nil { + return installAction + failed, err + } + + m, err := mgr.Connect() + if err != nil { + return installAction + failed, err + } + defer m.Disconnect() + + s, err := m.OpenService(windows.name) + if err == nil { + s.Close() + return installAction + failed, ErrAlreadyRunning + } + + s, err = m.CreateService(windows.name, execp, mgr.Config{ + DisplayName: windows.name, + Description: windows.description, + StartType: mgr.StartAutomatic, + Dependencies: windows.dependencies, + }, args...) + if err != nil { + return installAction + failed, err + } + defer s.Close() + + // set recovery action for service + // restart after 5 seconds for the first 3 times + // restart after 1 minute, otherwise + r := []mgr.RecoveryAction{ + mgr.RecoveryAction{ + Type: mgr.ServiceRestart, + Delay: 5000 * time.Millisecond, + }, + mgr.RecoveryAction{ + Type: mgr.ServiceRestart, + Delay: 5000 * time.Millisecond, + }, + mgr.RecoveryAction{ + Type: mgr.ServiceRestart, + Delay: 5000 * time.Millisecond, + }, + mgr.RecoveryAction{ + Type: mgr.ServiceRestart, + Delay: 60000 * time.Millisecond, + }, + } + // set reset period as a day + s.SetRecoveryActions(r, uint32(86400)) + + return installAction + " completed.", nil +} + +// Remove the service +func (windows *windowsRecord) Remove() (string, error) { + removeAction := "Removing " + windows.description + ":" + + m, err := mgr.Connect() + if err != nil { + return removeAction + failed, getWindowsError(err) + } + defer m.Disconnect() + s, err := m.OpenService(windows.name) + if err != nil { + return removeAction + failed, getWindowsError(err) + } + defer s.Close() + err = s.Delete() + if err != nil { + return removeAction + failed, getWindowsError(err) + } + + return removeAction + " completed.", nil +} + +// Start the service +func (windows *windowsRecord) Start() (string, error) { + startAction := "Starting " + windows.description + ":" + + m, err := mgr.Connect() + if err != nil { + return startAction + failed, getWindowsError(err) + } + defer m.Disconnect() + s, err := m.OpenService(windows.name) + if err != nil { + return startAction + failed, getWindowsError(err) + } + defer s.Close() + if err = s.Start(); err != nil { + return startAction + failed, getWindowsError(err) + } + + return startAction + " completed.", nil +} + +// Stop the service +func (windows *windowsRecord) Stop() (string, error) { + stopAction := "Stopping " + windows.description + ":" + + m, err := mgr.Connect() + if err != nil { + return stopAction + failed, getWindowsError(err) + } + defer m.Disconnect() + s, err := m.OpenService(windows.name) + if err != nil { + return stopAction + failed, getWindowsError(err) + } + defer s.Close() + if err := stopAndWait(s); err != nil { + return stopAction + failed, getWindowsError(err) + } + + return stopAction + " completed.", nil +} + +func stopAndWait(s *mgr.Service) error { + // First stop the service. Then wait for the service to + // actually stop before starting it. + status, err := s.Control(svc.Stop) + if err != nil { + return err + } + + timeDuration := time.Millisecond * 50 + + timeout := time.After(getStopTimeout() + (timeDuration * 2)) + tick := time.NewTicker(timeDuration) + defer tick.Stop() + + for status.State != svc.Stopped { + select { + case <-tick.C: + status, err = s.Query() + if err != nil { + return err + } + case <-timeout: + break + } + } + return nil +} + +func getStopTimeout() time.Duration { + // For default and paths see https://support.microsoft.com/en-us/kb/146092 + defaultTimeout := time.Millisecond * 20000 + key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control`, registry.READ) + if err != nil { + return defaultTimeout + } + sv, _, err := key.GetStringValue("WaitToKillServiceTimeout") + if err != nil { + return defaultTimeout + } + v, err := strconv.Atoi(sv) + if err != nil { + return defaultTimeout + } + return time.Millisecond * time.Duration(v) +} + +// Status - Get service status +func (windows *windowsRecord) Status() (string, error) { + m, err := mgr.Connect() + if err != nil { + return "Getting status:" + failed, getWindowsError(err) + } + defer m.Disconnect() + s, err := m.OpenService(windows.name) + if err != nil { + return "Getting status:" + failed, getWindowsError(err) + } + defer s.Close() + status, err := s.Query() + if err != nil { + return "Getting status:" + failed, getWindowsError(err) + } + + return "Status: " + getWindowsServiceStateFromUint32(status.State), nil +} + +// Get executable path +func execPath() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := syscall.MustLoadDLL( + "kernel32.dll", + ).MustFindProc( + "GetModuleFileNameW", + ).Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} + +// Get windows error +func getWindowsError(inputError error) error { + if exiterr, ok := inputError.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + if sysErr, ok := WinErrCode[status.ExitStatus()]; ok { + return errors.New(fmt.Sprintf("\n %s: %s \n %s", sysErr.Title, sysErr.Description, sysErr.Action)) + } + } + } + + return inputError +} + +// Get windows service state +func getWindowsServiceStateFromUint32(state svc.State) string { + switch state { + case svc.Stopped: + return "SERVICE_STOPPED" + case svc.StartPending: + return "SERVICE_START_PENDING" + case svc.StopPending: + return "SERVICE_STOP_PENDING" + case svc.Running: + return "SERVICE_RUNNING" + case svc.ContinuePending: + return "SERVICE_CONTINUE_PENDING" + case svc.PausePending: + return "SERVICE_PAUSE_PENDING" + case svc.Paused: + return "SERVICE_PAUSED" + } + return "SERVICE_UNKNOWN" +} + +type serviceHandler struct { + executable Executable +} + +func (sh *serviceHandler) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown | svc.AcceptPauseAndContinue + changes <- svc.Status{State: svc.StartPending} + + fasttick := time.Tick(500 * time.Millisecond) + slowtick := time.Tick(2 * time.Second) + tick := fasttick + + sh.executable.Start() + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} + +loop: + for { + select { + case <-tick: + break + case c := <-r: + switch c.Cmd { + case svc.Interrogate: + changes <- c.CurrentStatus + // Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4 + time.Sleep(100 * time.Millisecond) + changes <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + changes <- svc.Status{State: svc.StopPending} + sh.executable.Stop() + break loop + case svc.Pause: + changes <- svc.Status{State: svc.Paused, Accepts: cmdsAccepted} + tick = slowtick + case svc.Continue: + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} + tick = fasttick + default: + continue loop + } + } + } + return +} + +func (windows *windowsRecord) Run(e Executable) (string, error) { + runAction := "Running " + windows.description + ":" + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return runAction + failed, getWindowsError(err) + } + if !interactive { + // service called from windows service manager + // use API provided by golang.org/x/sys/windows + err = svc.Run(windows.name, &serviceHandler{ + executable: e, + }) + if err != nil { + return runAction + failed, getWindowsError(err) + } + } else { + // otherwise, service should be called from terminal session + e.Run() + } + + return runAction + " completed.", nil +} + +// GetTemplate - gets service config template +func (linux *windowsRecord) GetTemplate() string { + return "" +} + +// SetTemplate - sets service config template +func (linux *windowsRecord) SetTemplate(tplStr string) error { + return errors.New(fmt.Sprintf("templating is not supported for windows")) +} diff --git a/vendor/github.com/takama/daemon/helper.go b/vendor/github.com/takama/daemon/helper.go new file mode 100644 index 000000000..e4bd4c15e --- /dev/null +++ b/vendor/github.com/takama/daemon/helper.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +//+build go1.8 + +package daemon + +import ( + "errors" + "os" + "os/exec" + "strconv" + "strings" +) + +// Service constants +const ( + success = "\t\t\t\t\t[ \033[32mOK\033[0m ]" // Show colored "OK" + failed = "\t\t\t\t\t[\033[31mFAILED\033[0m]" // Show colored "FAILED" +) + +var ( + // ErrUnsupportedSystem appears if try to use service on system which is not supported by this release + ErrUnsupportedSystem = errors.New("Unsupported system") + + // ErrRootPrivileges appears if run installation or deleting the service without root privileges + ErrRootPrivileges = errors.New("You must have root user privileges. Possibly using 'sudo' command should help") + + // ErrAlreadyInstalled appears if service already installed on the system + ErrAlreadyInstalled = errors.New("Service has already been installed") + + // ErrNotInstalled appears if try to delete service which was not been installed + ErrNotInstalled = errors.New("Service is not installed") + + // ErrAlreadyRunning appears if try to start already running service + ErrAlreadyRunning = errors.New("Service is already running") + + // ErrAlreadyStopped appears if try to stop already stopped service + ErrAlreadyStopped = errors.New("Service has already been stopped") +) + +// ExecPath tries to get executable path +func ExecPath() (string, error) { + return os.Executable() +} + +// Lookup path for executable file +func executablePath(name string) (string, error) { + if path, err := exec.LookPath(name); err == nil { + if _, err := os.Stat(path); err == nil { + return path, nil + } + } + return os.Executable() +} + +// Check root rights to use system service +func checkPrivileges() (bool, error) { + + if output, err := exec.Command("id", "-g").Output(); err == nil { + if gid, parseErr := strconv.ParseUint(strings.TrimSpace(string(output)), 10, 32); parseErr == nil { + if gid == 0 { + return true, nil + } + return false, ErrRootPrivileges + } + } + return false, ErrUnsupportedSystem +} diff --git a/vendor/github.com/takama/daemon/helper_legacy.go b/vendor/github.com/takama/daemon/helper_legacy.go new file mode 100644 index 000000000..f52cc98b3 --- /dev/null +++ b/vendor/github.com/takama/daemon/helper_legacy.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package daemon + +import ( + "errors" + "os" + "os/exec" + "strconv" + "strings" +) + +// Service constants +const ( + success = "\t\t\t\t\t[ \033[32mOK\033[0m ]" // Show colored "OK" + failed = "\t\t\t\t\t[\033[31mFAILED\033[0m]" // Show colored "FAILED" +) + +var ( + // ErrUnsupportedSystem appears if try to use service on system which is not supported by this release + ErrUnsupportedSystem = errors.New("Unsupported system") + + // ErrRootPrivileges appears if run installation or deleting the service without root privileges + ErrRootPrivileges = errors.New("You must have root user privileges. Possibly using 'sudo' command should help") + + // ErrAlreadyInstalled appears if service already installed on the system + ErrAlreadyInstalled = errors.New("Service has already been installed") + + // ErrNotInstalled appears if try to delete service which was not been installed + ErrNotInstalled = errors.New("Service is not installed") + + // ErrAlreadyRunning appears if try to start already running service + ErrAlreadyRunning = errors.New("Service is already running") + + // ErrAlreadyStopped appears if try to stop already stopped service + ErrAlreadyStopped = errors.New("Service has already been stopped") +) + +// ExecPath tries to get executable path +func ExecPath() (string, error) { + return execPath() +} + +// Lookup path for executable file +func executablePath(name string) (string, error) { + if path, err := exec.LookPath(name); err == nil { + if _, err := os.Stat(path); err == nil { + return path, nil + } + } + return execPath() +} + +// Check root rights to use system service +func checkPrivileges() (bool, error) { + + if output, err := exec.Command("id", "-g").Output(); err == nil { + if gid, parseErr := strconv.ParseUint(strings.TrimSpace(string(output)), 10, 32); parseErr == nil { + if gid == 0 { + return true, nil + } + return false, ErrRootPrivileges + } + } + return false, ErrUnsupportedSystem +} diff --git a/vendor/github.com/takama/daemon/helper_windows.go b/vendor/github.com/takama/daemon/helper_windows.go new file mode 100644 index 000000000..2c176a381 --- /dev/null +++ b/vendor/github.com/takama/daemon/helper_windows.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by +// license that can be found in the LICENSE file. + +package daemon + +// SystemError contains error description and corresponded action helper to fix it +type SystemError struct { + Title string + Description string + Action string +} + +var ( + // WinErrCode - List of system errors from Microsoft source: + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx + WinErrCode = map[int]SystemError{ + 5: SystemError{ + Title: "ERROR_ACCESS_DENIED", + Description: "Access denied.", + Action: "Administrator access is needed to install a service.", + }, + 1051: SystemError{ + Title: "ERROR_DEPENDENT_SERVICES_RUNNING", + Description: "A stop control has been sent to a service that other running services are dependent on.", + }, + 1052: SystemError{ + Title: "ERROR_INVALID_SERVICE_CONTROL", + Description: "The requested control is not valid for this service.", + }, + 1053: SystemError{ + Title: "ERROR_SERVICE_REQUEST_TIMEOUT", + Description: "The service did not respond to the start or control request in a timely fashion.", + }, + 1054: SystemError{ + Title: "ERROR_SERVICE_NO_THREAD", + Description: "A thread could not be created for the service.", + }, + 1055: SystemError{ + Title: "ERROR_SERVICE_DATABASE_LOCKED", + Description: "The service database is locked.", + }, + 1056: SystemError{ + Title: "ERROR_SERVICE_ALREADY_RUNNING", + Description: "An instance of the service is already running.", + }, + 1057: SystemError{ + Title: "ERROR_INVALID_SERVICE_ACCOUNT", + Description: "The account name is invalid or does not exist, or the password is invalid for the account name specified.", + }, + 1058: SystemError{ + Title: "ERROR_SERVICE_DISABLED", + Description: "The service cannot be started, either because it is disabled or because it has no enabled devices associated with it.", + }, + 1060: SystemError{ + Title: "ERROR_SERVICE_DOES_NOT_EXIST", + Description: "The specified service does not exist as an installed service.", + }, + 1061: SystemError{ + Title: "ERROR_SERVICE_CANNOT_ACCEPT_CTRL", + Description: "The service cannot accept control messages at this time.", + }, + 1062: SystemError{ + Title: "ERROR_SERVICE_NOT_ACTIVE", + Description: "The service has not been started.", + }, + 1063: SystemError{ + Title: "ERROR_FAILED_SERVICE_CONTROLLER_CONNECT", + Description: "The service process could not connect to the service controller.", + }, + 1064: SystemError{ + Title: "ERROR_EXCEPTION_IN_SERVICE", + Description: "An exception occurred in the service when handling the control request.", + }, + 1066: SystemError{ + Title: "ERROR_SERVICE_SPECIFIC_ERROR", + Description: "The service has returned a service-specific error code.", + }, + 1068: SystemError{ + Title: "ERROR_SERVICE_DEPENDENCY_FAIL", + Description: "The dependency service or group failed to start.", + }, + 1069: SystemError{ + Title: "ERROR_SERVICE_LOGON_FAILED", + Description: "The service did not start due to a logon failure.", + }, + 1070: SystemError{ + Title: "ERROR_SERVICE_START_HANG", + Description: "After starting, the service hung in a start-pending state.", + }, + 1071: SystemError{ + Title: "ERROR_INVALID_SERVICE_LOCK", + Description: "The specified service database lock is invalid.", + }, + 1072: SystemError{ + Title: "ERROR_SERVICE_MARKED_FOR_DELETE", + Description: "The specified service has been marked for deletion.", + }, + 1073: SystemError{ + Title: "ERROR_SERVICE_EXISTS", + Description: "The specified service already exists.", + }, + 1075: SystemError{ + Title: "ERROR_SERVICE_DEPENDENCY_DELETED", + Description: "The dependency service does not exist or has been marked for deletion.", + }, + 1077: SystemError{ + Title: "ERROR_SERVICE_NEVER_STARTED", + Description: "No attempts to start the service have been made since the last boot.", + }, + 1078: SystemError{ + Title: "ERROR_DUPLICATE_SERVICE_NAME", + Description: "The name is already in use as either a service name or a service display name.", + }, + 1079: SystemError{ + Title: "ERROR_DIFFERENT_SERVICE_ACCOUNT", + Description: "The account specified for this service is different from the account specified for other services running in the same process.", + }, + 1083: SystemError{ + Title: "ERROR_SERVICE_NOT_IN_EXE", + Description: "The executable program that this service is configured to run in does not implement the service.", + }, + 1084: SystemError{ + Title: "ERROR_NOT_SAFEBOOT_SERVICE", + Description: "This service cannot be started in Safe Mode.", + }, + } +) diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000..8a237c5d6 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +func Delete[S ~[]E, E any](s S, i, j int) S { + return append(s[:i], s[j:]...) +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. Grow may modify elements of the +// slice between the length and the capacity. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + return append(s, make(S, n)...)[:len(s)] +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000..c22e74bd1 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,127 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStable sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // search returns the leftmost position where f returns true, or len(x) if f + // returns false for all x. This is the insertion position for target in x, + // and could point to an element that's either == target or not. + pos := search(len(x), func(i int) bool { return x[i] >= target }) + if pos >= len(x) || x[pos] != target { + return pos, false + } else { + return pos, true + } +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { + pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) + if pos >= len(x) || cmp(x[pos], target) != 0 { + return pos, false + } else { + return pos, true + } +} + +func search(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 000000000..2a632476c --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000..efaa1c8b7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 000000000..6c8d97b6a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 000000000..ee74927d3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate +// +build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 000000000..417335123 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 000000000..2789f6f18 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,387 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 000000000..fc1835d8a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/config.go b/vendor/golang.org/x/sys/windows/svc/mgr/config.go new file mode 100644 index 000000000..04554862c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/config.go @@ -0,0 +1,181 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package mgr + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // Service start types. + StartManual = windows.SERVICE_DEMAND_START // the service must be started manually + StartAutomatic = windows.SERVICE_AUTO_START // the service will start by itself whenever the computer reboots + StartDisabled = windows.SERVICE_DISABLED // the service cannot be started + + // The severity of the error, and action taken, + // if this service fails to start. + ErrorCritical = windows.SERVICE_ERROR_CRITICAL + ErrorIgnore = windows.SERVICE_ERROR_IGNORE + ErrorNormal = windows.SERVICE_ERROR_NORMAL + ErrorSevere = windows.SERVICE_ERROR_SEVERE +) + +// TODO(brainman): Password is not returned by windows.QueryServiceConfig, not sure how to get it. + +type Config struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName string // fully qualified path to the service binary file, can also include arguments for an auto-start service + LoadOrderGroup string + TagId uint32 + Dependencies []string + ServiceStartName string // name of the account under which the service should run + DisplayName string + Password string + Description string + SidType uint32 // one of SERVICE_SID_TYPE, the type of sid to use for the service + DelayedAutoStart bool // the service is started after other auto-start services are started plus a short delay +} + +func toStringSlice(ps *uint16) []string { + r := make([]string, 0) + p := unsafe.Pointer(ps) + + for { + s := windows.UTF16PtrToString((*uint16)(p)) + if len(s) == 0 { + break + } + + r = append(r, s) + offset := unsafe.Sizeof(uint16(0)) * (uintptr)(len(s)+1) + p = unsafe.Pointer(uintptr(p) + offset) + } + + return r +} + +// Config retrieves service s configuration paramteres. +func (s *Service) Config() (Config, error) { + var p *windows.QUERY_SERVICE_CONFIG + n := uint32(1024) + for { + b := make([]byte, n) + p = (*windows.QUERY_SERVICE_CONFIG)(unsafe.Pointer(&b[0])) + err := windows.QueryServiceConfig(s.Handle, p, n, &n) + if err == nil { + break + } + if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER { + return Config{}, err + } + if n <= uint32(len(b)) { + return Config{}, err + } + } + + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_DESCRIPTION) + if err != nil { + return Config{}, err + } + p2 := (*windows.SERVICE_DESCRIPTION)(unsafe.Pointer(&b[0])) + + b, err = s.queryServiceConfig2(windows.SERVICE_CONFIG_DELAYED_AUTO_START_INFO) + if err != nil { + return Config{}, err + } + p3 := (*windows.SERVICE_DELAYED_AUTO_START_INFO)(unsafe.Pointer(&b[0])) + delayedStart := false + if p3.IsDelayedAutoStartUp != 0 { + delayedStart = true + } + + b, err = s.queryServiceConfig2(windows.SERVICE_CONFIG_SERVICE_SID_INFO) + if err != nil { + return Config{}, err + } + sidType := *(*uint32)(unsafe.Pointer(&b[0])) + + return Config{ + ServiceType: p.ServiceType, + StartType: p.StartType, + ErrorControl: p.ErrorControl, + BinaryPathName: windows.UTF16PtrToString(p.BinaryPathName), + LoadOrderGroup: windows.UTF16PtrToString(p.LoadOrderGroup), + TagId: p.TagId, + Dependencies: toStringSlice(p.Dependencies), + ServiceStartName: windows.UTF16PtrToString(p.ServiceStartName), + DisplayName: windows.UTF16PtrToString(p.DisplayName), + Description: windows.UTF16PtrToString(p2.Description), + DelayedAutoStart: delayedStart, + SidType: sidType, + }, nil +} + +func updateDescription(handle windows.Handle, desc string) error { + d := windows.SERVICE_DESCRIPTION{Description: toPtr(desc)} + return windows.ChangeServiceConfig2(handle, + windows.SERVICE_CONFIG_DESCRIPTION, (*byte)(unsafe.Pointer(&d))) +} + +func updateSidType(handle windows.Handle, sidType uint32) error { + return windows.ChangeServiceConfig2(handle, windows.SERVICE_CONFIG_SERVICE_SID_INFO, (*byte)(unsafe.Pointer(&sidType))) +} + +func updateStartUp(handle windows.Handle, isDelayed bool) error { + var d windows.SERVICE_DELAYED_AUTO_START_INFO + if isDelayed { + d.IsDelayedAutoStartUp = 1 + } + return windows.ChangeServiceConfig2(handle, + windows.SERVICE_CONFIG_DELAYED_AUTO_START_INFO, (*byte)(unsafe.Pointer(&d))) +} + +// UpdateConfig updates service s configuration parameters. +func (s *Service) UpdateConfig(c Config) error { + err := windows.ChangeServiceConfig(s.Handle, c.ServiceType, c.StartType, + c.ErrorControl, toPtr(c.BinaryPathName), toPtr(c.LoadOrderGroup), + nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), + toPtr(c.Password), toPtr(c.DisplayName)) + if err != nil { + return err + } + err = updateSidType(s.Handle, c.SidType) + if err != nil { + return err + } + + err = updateStartUp(s.Handle, c.DelayedAutoStart) + if err != nil { + return err + } + + return updateDescription(s.Handle, c.Description) +} + +// queryServiceConfig2 calls Windows QueryServiceConfig2 with infoLevel parameter and returns retrieved service configuration information. +func (s *Service) queryServiceConfig2(infoLevel uint32) ([]byte, error) { + n := uint32(1024) + for { + b := make([]byte, n) + err := windows.QueryServiceConfig2(s.Handle, infoLevel, &b[0], n, &n) + if err == nil { + return b, nil + } + if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + if n <= uint32(len(b)) { + return nil, err + } + } +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go new file mode 100644 index 000000000..c2dc8701d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -0,0 +1,215 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package mgr can be used to manage Windows service programs. +// It can be used to install and remove them. It can also start, +// stop and pause them. The package can query / change current +// service state and config parameters. +package mgr + +import ( + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" + "golang.org/x/sys/windows" +) + +// Mgr is used to manage Windows service. +type Mgr struct { + Handle windows.Handle +} + +// Connect establishes a connection to the service control manager. +func Connect() (*Mgr, error) { + return ConnectRemote("") +} + +// ConnectRemote establishes a connection to the +// service control manager on computer named host. +func ConnectRemote(host string) (*Mgr, error) { + var s *uint16 + if host != "" { + s = syscall.StringToUTF16Ptr(host) + } + h, err := windows.OpenSCManager(s, nil, windows.SC_MANAGER_ALL_ACCESS) + if err != nil { + return nil, err + } + return &Mgr{Handle: h}, nil +} + +// Disconnect closes connection to the service control manager m. +func (m *Mgr) Disconnect() error { + return windows.CloseServiceHandle(m.Handle) +} + +type LockStatus struct { + IsLocked bool // Whether the SCM has been locked. + Age time.Duration // For how long the SCM has been locked. + Owner string // The name of the user who has locked the SCM. +} + +// LockStatus returns whether the service control manager is locked by +// the system, for how long, and by whom. A locked SCM indicates that +// most service actions will block until the system unlocks the SCM. +func (m *Mgr) LockStatus() (*LockStatus, error) { + bytesNeeded := uint32(unsafe.Sizeof(windows.QUERY_SERVICE_LOCK_STATUS{}) + 1024) + for { + bytes := make([]byte, bytesNeeded) + lockStatus := (*windows.QUERY_SERVICE_LOCK_STATUS)(unsafe.Pointer(&bytes[0])) + err := windows.QueryServiceLockStatus(m.Handle, lockStatus, uint32(len(bytes)), &bytesNeeded) + if err == windows.ERROR_INSUFFICIENT_BUFFER && bytesNeeded >= uint32(unsafe.Sizeof(windows.QUERY_SERVICE_LOCK_STATUS{})) { + continue + } + if err != nil { + return nil, err + } + status := &LockStatus{ + IsLocked: lockStatus.IsLocked != 0, + Age: time.Duration(lockStatus.LockDuration) * time.Second, + Owner: windows.UTF16PtrToString(lockStatus.LockOwner), + } + return status, nil + } +} + +func toPtr(s string) *uint16 { + if len(s) == 0 { + return nil + } + return syscall.StringToUTF16Ptr(s) +} + +// toStringBlock terminates strings in ss with 0, and then +// concatenates them together. It also adds extra 0 at the end. +func toStringBlock(ss []string) *uint16 { + if len(ss) == 0 { + return nil + } + t := "" + for _, s := range ss { + if s != "" { + t += s + "\x00" + } + } + if t == "" { + return nil + } + t += "\x00" + return &utf16.Encode([]rune(t))[0] +} + +// CreateService installs new service name on the system. +// The service will be executed by running exepath binary. +// Use config c to specify service parameters. +// Any args will be passed as command-line arguments when +// the service is started; these arguments are distinct from +// the arguments passed to Service.Start or via the "Start +// parameters" field in the service's Properties dialog box. +func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Service, error) { + if c.StartType == 0 { + c.StartType = StartManual + } + if c.ServiceType == 0 { + c.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS + } + s := syscall.EscapeArg(exepath) + for _, v := range args { + s += " " + syscall.EscapeArg(v) + } + h, err := windows.CreateService(m.Handle, toPtr(name), toPtr(c.DisplayName), + windows.SERVICE_ALL_ACCESS, c.ServiceType, + c.StartType, c.ErrorControl, toPtr(s), toPtr(c.LoadOrderGroup), + nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), toPtr(c.Password)) + if err != nil { + return nil, err + } + if c.SidType != windows.SERVICE_SID_TYPE_NONE { + err = updateSidType(h, c.SidType) + if err != nil { + windows.DeleteService(h) + windows.CloseServiceHandle(h) + return nil, err + } + } + if c.Description != "" { + err = updateDescription(h, c.Description) + if err != nil { + windows.DeleteService(h) + windows.CloseServiceHandle(h) + return nil, err + } + } + if c.DelayedAutoStart { + err = updateStartUp(h, c.DelayedAutoStart) + if err != nil { + windows.DeleteService(h) + windows.CloseServiceHandle(h) + return nil, err + } + } + return &Service{Name: name, Handle: h}, nil +} + +// OpenService retrieves access to service name, so it can +// be interrogated and controlled. +func (m *Mgr) OpenService(name string) (*Service, error) { + h, err := windows.OpenService(m.Handle, syscall.StringToUTF16Ptr(name), windows.SERVICE_ALL_ACCESS) + if err != nil { + return nil, err + } + return &Service{Name: name, Handle: h}, nil +} + +// ListServices enumerates services in the specified +// service control manager database m. +// If the caller does not have the SERVICE_QUERY_STATUS +// access right to a service, the service is silently +// omitted from the list of services returned. +func (m *Mgr) ListServices() ([]string, error) { + var err error + var bytesNeeded, servicesReturned uint32 + var buf []byte + for { + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + err = windows.EnumServicesStatusEx(m.Handle, windows.SC_ENUM_PROCESS_INFO, + windows.SERVICE_WIN32, windows.SERVICE_STATE_ALL, + p, uint32(len(buf)), &bytesNeeded, &servicesReturned, nil, nil) + if err == nil { + break + } + if err != syscall.ERROR_MORE_DATA { + return nil, err + } + if bytesNeeded <= uint32(len(buf)) { + return nil, err + } + buf = make([]byte, bytesNeeded) + } + if servicesReturned == 0 { + return nil, nil + } + + var services []windows.ENUM_SERVICE_STATUS_PROCESS + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&services)) + hdr.Data = unsafe.Pointer(&buf[0]) + hdr.Len = int(servicesReturned) + hdr.Cap = int(servicesReturned) + + var names []string + for _, s := range services { + name := windows.UTF16PtrToString(s.ServiceName) + names = append(names, name) + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go b/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go new file mode 100644 index 000000000..321451990 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go @@ -0,0 +1,169 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package mgr + +import ( + "errors" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" + "golang.org/x/sys/windows" +) + +const ( + // Possible recovery actions that the service control manager can perform. + NoAction = windows.SC_ACTION_NONE // no action + ComputerReboot = windows.SC_ACTION_REBOOT // reboot the computer + ServiceRestart = windows.SC_ACTION_RESTART // restart the service + RunCommand = windows.SC_ACTION_RUN_COMMAND // run a command +) + +// RecoveryAction represents an action that the service control manager can perform when service fails. +// A service is considered failed when it terminates without reporting a status of SERVICE_STOPPED to the service controller. +type RecoveryAction struct { + Type int // one of NoAction, ComputerReboot, ServiceRestart or RunCommand + Delay time.Duration // the time to wait before performing the specified action +} + +// SetRecoveryActions sets actions that service controller performs when service fails and +// the time after which to reset the service failure count to zero if there are no failures, in seconds. +// Specify INFINITE to indicate that service failure count should never be reset. +func (s *Service) SetRecoveryActions(recoveryActions []RecoveryAction, resetPeriod uint32) error { + if recoveryActions == nil { + return errors.New("recoveryActions cannot be nil") + } + actions := []windows.SC_ACTION{} + for _, a := range recoveryActions { + action := windows.SC_ACTION{ + Type: uint32(a.Type), + Delay: uint32(a.Delay.Nanoseconds() / 1000000), + } + actions = append(actions, action) + } + rActions := windows.SERVICE_FAILURE_ACTIONS{ + ActionsCount: uint32(len(actions)), + Actions: &actions[0], + ResetPeriod: resetPeriod, + } + return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) +} + +// RecoveryActions returns actions that service controller performs when service fails. +// The service control manager counts the number of times service s has failed since the system booted. +// The count is reset to 0 if the service has not failed for ResetPeriod seconds. +// When the service fails for the Nth time, the service controller performs the action specified in element [N-1] of returned slice. +// If N is greater than slice length, the service controller repeats the last action in the slice. +func (s *Service) RecoveryActions() ([]RecoveryAction, error) { + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS) + if err != nil { + return nil, err + } + p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0])) + if p.Actions == nil { + return nil, err + } + + var actions []windows.SC_ACTION + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&actions)) + hdr.Data = unsafe.Pointer(p.Actions) + hdr.Len = int(p.ActionsCount) + hdr.Cap = int(p.ActionsCount) + + var recoveryActions []RecoveryAction + for _, action := range actions { + recoveryActions = append(recoveryActions, RecoveryAction{Type: int(action.Type), Delay: time.Duration(action.Delay) * time.Millisecond}) + } + return recoveryActions, nil +} + +// ResetRecoveryActions deletes both reset period and array of failure actions. +func (s *Service) ResetRecoveryActions() error { + actions := make([]windows.SC_ACTION, 1) + rActions := windows.SERVICE_FAILURE_ACTIONS{ + Actions: &actions[0], + } + return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) +} + +// ResetPeriod is the time after which to reset the service failure +// count to zero if there are no failures, in seconds. +func (s *Service) ResetPeriod() (uint32, error) { + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS) + if err != nil { + return 0, err + } + p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0])) + return p.ResetPeriod, nil +} + +// SetRebootMessage sets service s reboot message. +// If msg is "", the reboot message is deleted and no message is broadcast. +func (s *Service) SetRebootMessage(msg string) error { + rActions := windows.SERVICE_FAILURE_ACTIONS{ + RebootMsg: syscall.StringToUTF16Ptr(msg), + } + return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) +} + +// RebootMessage is broadcast to server users before rebooting in response to the ComputerReboot service controller action. +func (s *Service) RebootMessage() (string, error) { + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS) + if err != nil { + return "", err + } + p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0])) + return windows.UTF16PtrToString(p.RebootMsg), nil +} + +// SetRecoveryCommand sets the command line of the process to execute in response to the RunCommand service controller action. +// If cmd is "", the command is deleted and no program is run when the service fails. +func (s *Service) SetRecoveryCommand(cmd string) error { + rActions := windows.SERVICE_FAILURE_ACTIONS{ + Command: syscall.StringToUTF16Ptr(cmd), + } + return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions))) +} + +// RecoveryCommand is the command line of the process to execute in response to the RunCommand service controller action. This process runs under the same account as the service. +func (s *Service) RecoveryCommand() (string, error) { + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS) + if err != nil { + return "", err + } + p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0])) + return windows.UTF16PtrToString(p.Command), nil +} + +// SetRecoveryActionsOnNonCrashFailures sets the failure actions flag. If the +// flag is set to false, recovery actions will only be performed if the service +// terminates without reporting a status of SERVICE_STOPPED. If the flag is set +// to true, recovery actions are also perfomed if the service stops with a +// nonzero exit code. +func (s *Service) SetRecoveryActionsOnNonCrashFailures(flag bool) error { + var setting windows.SERVICE_FAILURE_ACTIONS_FLAG + if flag { + setting.FailureActionsOnNonCrashFailures = 1 + } + return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG, (*byte)(unsafe.Pointer(&setting))) +} + +// RecoveryActionsOnNonCrashFailures returns the current value of the failure +// actions flag. If the flag is set to false, recovery actions will only be +// performed if the service terminates without reporting a status of +// SERVICE_STOPPED. If the flag is set to true, recovery actions are also +// perfomed if the service stops with a nonzero exit code. +func (s *Service) RecoveryActionsOnNonCrashFailures() (bool, error) { + b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG) + if err != nil { + return false, err + } + p := (*windows.SERVICE_FAILURE_ACTIONS_FLAG)(unsafe.Pointer(&b[0])) + return p.FailureActionsOnNonCrashFailures != 0, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/service.go b/vendor/golang.org/x/sys/windows/svc/mgr/service.go new file mode 100644 index 000000000..be3d151a3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/mgr/service.go @@ -0,0 +1,125 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package mgr + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" +) + +// Service is used to access Windows service. +type Service struct { + Name string + Handle windows.Handle +} + +// Delete marks service s for deletion from the service control manager database. +func (s *Service) Delete() error { + return windows.DeleteService(s.Handle) +} + +// Close relinquish access to the service s. +func (s *Service) Close() error { + return windows.CloseServiceHandle(s.Handle) +} + +// Start starts service s. +// args will be passed to svc.Handler.Execute. +func (s *Service) Start(args ...string) error { + var p **uint16 + if len(args) > 0 { + vs := make([]*uint16, len(args)) + for i := range vs { + vs[i] = syscall.StringToUTF16Ptr(args[i]) + } + p = &vs[0] + } + return windows.StartService(s.Handle, uint32(len(args)), p) +} + +// Control sends state change request c to the service s. It returns the most +// recent status the service reported to the service control manager, and an +// error if the state change request was not accepted. +// Note that the returned service status is only set if the status change +// request succeeded, or if it failed with error ERROR_INVALID_SERVICE_CONTROL, +// ERROR_SERVICE_CANNOT_ACCEPT_CTRL, or ERROR_SERVICE_NOT_ACTIVE. +func (s *Service) Control(c svc.Cmd) (svc.Status, error) { + var t windows.SERVICE_STATUS + err := windows.ControlService(s.Handle, uint32(c), &t) + if err != nil && + err != windows.ERROR_INVALID_SERVICE_CONTROL && + err != windows.ERROR_SERVICE_CANNOT_ACCEPT_CTRL && + err != windows.ERROR_SERVICE_NOT_ACTIVE { + return svc.Status{}, err + } + return svc.Status{ + State: svc.State(t.CurrentState), + Accepts: svc.Accepted(t.ControlsAccepted), + }, err +} + +// Query returns current status of service s. +func (s *Service) Query() (svc.Status, error) { + var t windows.SERVICE_STATUS_PROCESS + var needed uint32 + err := windows.QueryServiceStatusEx(s.Handle, windows.SC_STATUS_PROCESS_INFO, (*byte)(unsafe.Pointer(&t)), uint32(unsafe.Sizeof(t)), &needed) + if err != nil { + return svc.Status{}, err + } + return svc.Status{ + State: svc.State(t.CurrentState), + Accepts: svc.Accepted(t.ControlsAccepted), + ProcessId: t.ProcessId, + Win32ExitCode: t.Win32ExitCode, + ServiceSpecificExitCode: t.ServiceSpecificExitCode, + }, nil +} + +// ListDependentServices returns the names of the services dependent on service s, which match the given status. +func (s *Service) ListDependentServices(status svc.ActivityStatus) ([]string, error) { + var bytesNeeded, returnedServiceCount uint32 + var services []windows.ENUM_SERVICE_STATUS + for { + var servicesPtr *windows.ENUM_SERVICE_STATUS + if len(services) > 0 { + servicesPtr = &services[0] + } + allocatedBytes := uint32(len(services)) * uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) + err := windows.EnumDependentServices(s.Handle, uint32(status), servicesPtr, allocatedBytes, &bytesNeeded, + &returnedServiceCount) + if err == nil { + break + } + if err != syscall.ERROR_MORE_DATA { + return nil, err + } + if bytesNeeded <= allocatedBytes { + return nil, err + } + // ERROR_MORE_DATA indicates the provided buffer was too small, run the call again after resizing the buffer + requiredSliceLen := bytesNeeded / uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) + if bytesNeeded%uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) != 0 { + requiredSliceLen += 1 + } + services = make([]windows.ENUM_SERVICE_STATUS, requiredSliceLen) + } + if returnedServiceCount == 0 { + return nil, nil + } + + // The slice mutated by EnumDependentServices may have a length greater than returnedServiceCount, any elements + // past that should be ignored. + var dependents []string + for i := 0; i < int(returnedServiceCount); i++ { + dependents = append(dependents, windows.UTF16PtrToString(services[i].ServiceName)) + } + return dependents, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/security.go b/vendor/golang.org/x/sys/windows/svc/security.go new file mode 100644 index 000000000..1c51006ea --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/security.go @@ -0,0 +1,101 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package svc + +import ( + "strings" + "unsafe" + + "golang.org/x/sys/windows" +) + +func allocSid(subAuth0 uint32) (*windows.SID, error) { + var sid *windows.SID + err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) + if err != nil { + return nil, err + } + return sid, nil +} + +// IsAnInteractiveSession determines if calling process is running interactively. +// It queries the process token for membership in the Interactive group. +// http://stackoverflow.com/questions/2668851/how-do-i-detect-that-my-application-is-running-as-service-or-in-an-interactive-s +// +// Deprecated: Use IsWindowsService instead. +func IsAnInteractiveSession() (bool, error) { + interSid, err := allocSid(windows.SECURITY_INTERACTIVE_RID) + if err != nil { + return false, err + } + defer windows.FreeSid(interSid) + + serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID) + if err != nil { + return false, err + } + defer windows.FreeSid(serviceSid) + + t, err := windows.OpenCurrentProcessToken() + if err != nil { + return false, err + } + defer t.Close() + + gs, err := t.GetTokenGroups() + if err != nil { + return false, err + } + + for _, g := range gs.AllGroups() { + if windows.EqualSid(g.Sid, interSid) { + return true, nil + } + if windows.EqualSid(g.Sid, serviceSid) { + return false, nil + } + } + return false, nil +} + +// IsWindowsService reports whether the process is currently executing +// as a Windows service. +func IsWindowsService() (bool, error) { + // The below technique looks a bit hairy, but it's actually + // exactly what the .NET framework does for the similarly named function: + // https://github.com/dotnet/extensions/blob/f4066026ca06984b07e90e61a6390ac38152ba93/src/Hosting/WindowsServices/src/WindowsServiceHelpers.cs#L26-L31 + // Specifically, it looks up whether the parent process has session ID zero + // and is called "services". + + var currentProcess windows.PROCESS_BASIC_INFORMATION + infoSize := uint32(unsafe.Sizeof(currentProcess)) + err := windows.NtQueryInformationProcess(windows.CurrentProcess(), windows.ProcessBasicInformation, unsafe.Pointer(¤tProcess), infoSize, &infoSize) + if err != nil { + return false, err + } + var parentProcess *windows.SYSTEM_PROCESS_INFORMATION + for infoSize = uint32((unsafe.Sizeof(*parentProcess) + unsafe.Sizeof(uintptr(0))) * 1024); ; { + parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(&make([]byte, infoSize)[0])) + err = windows.NtQuerySystemInformation(windows.SystemProcessInformation, unsafe.Pointer(parentProcess), infoSize, &infoSize) + if err == nil { + break + } else if err != windows.STATUS_INFO_LENGTH_MISMATCH { + return false, err + } + } + for ; ; parentProcess = (*windows.SYSTEM_PROCESS_INFORMATION)(unsafe.Pointer(uintptr(unsafe.Pointer(parentProcess)) + uintptr(parentProcess.NextEntryOffset))) { + if parentProcess.UniqueProcessID == currentProcess.InheritedFromUniqueProcessId { + return parentProcess.SessionID == 0 && strings.EqualFold("services.exe", parentProcess.ImageName.String()), nil + } + if parentProcess.NextEntryOffset == 0 { + break + } + } + return false, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go new file mode 100644 index 000000000..2b4a7bc6c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -0,0 +1,322 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package svc provides everything required to build Windows service. +package svc + +import ( + "errors" + "sync" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" + "golang.org/x/sys/windows" +) + +// State describes service execution state (Stopped, Running and so on). +type State uint32 + +const ( + Stopped = State(windows.SERVICE_STOPPED) + StartPending = State(windows.SERVICE_START_PENDING) + StopPending = State(windows.SERVICE_STOP_PENDING) + Running = State(windows.SERVICE_RUNNING) + ContinuePending = State(windows.SERVICE_CONTINUE_PENDING) + PausePending = State(windows.SERVICE_PAUSE_PENDING) + Paused = State(windows.SERVICE_PAUSED) +) + +// Cmd represents service state change request. It is sent to a service +// by the service manager, and should be actioned upon by the service. +type Cmd uint32 + +const ( + Stop = Cmd(windows.SERVICE_CONTROL_STOP) + Pause = Cmd(windows.SERVICE_CONTROL_PAUSE) + Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE) + Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE) + Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN) + ParamChange = Cmd(windows.SERVICE_CONTROL_PARAMCHANGE) + NetBindAdd = Cmd(windows.SERVICE_CONTROL_NETBINDADD) + NetBindRemove = Cmd(windows.SERVICE_CONTROL_NETBINDREMOVE) + NetBindEnable = Cmd(windows.SERVICE_CONTROL_NETBINDENABLE) + NetBindDisable = Cmd(windows.SERVICE_CONTROL_NETBINDDISABLE) + DeviceEvent = Cmd(windows.SERVICE_CONTROL_DEVICEEVENT) + HardwareProfileChange = Cmd(windows.SERVICE_CONTROL_HARDWAREPROFILECHANGE) + PowerEvent = Cmd(windows.SERVICE_CONTROL_POWEREVENT) + SessionChange = Cmd(windows.SERVICE_CONTROL_SESSIONCHANGE) + PreShutdown = Cmd(windows.SERVICE_CONTROL_PRESHUTDOWN) +) + +// Accepted is used to describe commands accepted by the service. +// Note that Interrogate is always accepted. +type Accepted uint32 + +const ( + AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP) + AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN) + AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE) + AcceptParamChange = Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE) + AcceptNetBindChange = Accepted(windows.SERVICE_ACCEPT_NETBINDCHANGE) + AcceptHardwareProfileChange = Accepted(windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE) + AcceptPowerEvent = Accepted(windows.SERVICE_ACCEPT_POWEREVENT) + AcceptSessionChange = Accepted(windows.SERVICE_ACCEPT_SESSIONCHANGE) + AcceptPreShutdown = Accepted(windows.SERVICE_ACCEPT_PRESHUTDOWN) +) + +// ActivityStatus allows for services to be selected based on active and inactive categories of service state. +type ActivityStatus uint32 + +const ( + Active = ActivityStatus(windows.SERVICE_ACTIVE) + Inactive = ActivityStatus(windows.SERVICE_INACTIVE) + AnyActivity = ActivityStatus(windows.SERVICE_STATE_ALL) +) + +// Status combines State and Accepted commands to fully describe running service. +type Status struct { + State State + Accepts Accepted + CheckPoint uint32 // used to report progress during a lengthy operation + WaitHint uint32 // estimated time required for a pending operation, in milliseconds + ProcessId uint32 // if the service is running, the process identifier of it, and otherwise zero + Win32ExitCode uint32 // set if the service has exited with a win32 exit code + ServiceSpecificExitCode uint32 // set if the service has exited with a service-specific exit code +} + +// StartReason is the reason that the service was started. +type StartReason uint32 + +const ( + StartReasonDemand = StartReason(windows.SERVICE_START_REASON_DEMAND) + StartReasonAuto = StartReason(windows.SERVICE_START_REASON_AUTO) + StartReasonTrigger = StartReason(windows.SERVICE_START_REASON_TRIGGER) + StartReasonRestartOnFailure = StartReason(windows.SERVICE_START_REASON_RESTART_ON_FAILURE) + StartReasonDelayedAuto = StartReason(windows.SERVICE_START_REASON_DELAYEDAUTO) +) + +// ChangeRequest is sent to the service Handler to request service status change. +type ChangeRequest struct { + Cmd Cmd + EventType uint32 + EventData uintptr + CurrentStatus Status + Context uintptr +} + +// Handler is the interface that must be implemented to build Windows service. +type Handler interface { + // Execute will be called by the package code at the start of + // the service, and the service will exit once Execute completes. + // Inside Execute you must read service change requests from r and + // act accordingly. You must keep service control manager up to date + // about state of your service by writing into s as required. + // args contains service name followed by argument strings passed + // to the service. + // You can provide service exit code in exitCode return parameter, + // with 0 being "no error". You can also indicate if exit code, + // if any, is service specific or not by using svcSpecificEC + // parameter. + Execute(args []string, r <-chan ChangeRequest, s chan<- Status) (svcSpecificEC bool, exitCode uint32) +} + +type ctlEvent struct { + cmd Cmd + eventType uint32 + eventData uintptr + context uintptr + errno uint32 +} + +// service provides access to windows service api. +type service struct { + name string + h windows.Handle + c chan ctlEvent + handler Handler +} + +type exitCode struct { + isSvcSpecific bool + errno uint32 +} + +func (s *service) updateStatus(status *Status, ec *exitCode) error { + if s.h == 0 { + return errors.New("updateStatus with no service status handle") + } + var t windows.SERVICE_STATUS + t.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS + t.CurrentState = uint32(status.State) + if status.Accepts&AcceptStop != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_STOP + } + if status.Accepts&AcceptShutdown != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_SHUTDOWN + } + if status.Accepts&AcceptPauseAndContinue != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PAUSE_CONTINUE + } + if status.Accepts&AcceptParamChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PARAMCHANGE + } + if status.Accepts&AcceptNetBindChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_NETBINDCHANGE + } + if status.Accepts&AcceptHardwareProfileChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE + } + if status.Accepts&AcceptPowerEvent != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_POWEREVENT + } + if status.Accepts&AcceptSessionChange != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_SESSIONCHANGE + } + if status.Accepts&AcceptPreShutdown != 0 { + t.ControlsAccepted |= windows.SERVICE_ACCEPT_PRESHUTDOWN + } + if ec.errno == 0 { + t.Win32ExitCode = windows.NO_ERROR + t.ServiceSpecificExitCode = windows.NO_ERROR + } else if ec.isSvcSpecific { + t.Win32ExitCode = uint32(windows.ERROR_SERVICE_SPECIFIC_ERROR) + t.ServiceSpecificExitCode = ec.errno + } else { + t.Win32ExitCode = ec.errno + t.ServiceSpecificExitCode = windows.NO_ERROR + } + t.CheckPoint = status.CheckPoint + t.WaitHint = status.WaitHint + return windows.SetServiceStatus(s.h, &t) +} + +var ( + initCallbacks sync.Once + ctlHandlerCallback uintptr + serviceMainCallback uintptr +) + +func ctlHandler(ctl, evtype, evdata, context uintptr) uintptr { + s := (*service)(unsafe.Pointer(context)) + e := ctlEvent{cmd: Cmd(ctl), eventType: uint32(evtype), eventData: evdata, context: 123456} // Set context to 123456 to test issue #25660. + s.c <- e + return 0 +} + +var theService service // This is, unfortunately, a global, which means only one service per process. + +// serviceMain is the entry point called by the service manager, registered earlier by +// the call to StartServiceCtrlDispatcher. +func serviceMain(argc uint32, argv **uint16) uintptr { + handle, err := windows.RegisterServiceCtrlHandlerEx(windows.StringToUTF16Ptr(theService.name), ctlHandlerCallback, uintptr(unsafe.Pointer(&theService))) + if sysErr, ok := err.(windows.Errno); ok { + return uintptr(sysErr) + } else if err != nil { + return uintptr(windows.ERROR_UNKNOWN_EXCEPTION) + } + theService.h = handle + defer func() { + theService.h = 0 + }() + var args16 []*uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&args16)) + hdr.Data = unsafe.Pointer(argv) + hdr.Len = int(argc) + hdr.Cap = int(argc) + + args := make([]string, len(args16)) + for i, a := range args16 { + args[i] = windows.UTF16PtrToString(a) + } + + cmdsToHandler := make(chan ChangeRequest) + changesFromHandler := make(chan Status) + exitFromHandler := make(chan exitCode) + + go func() { + ss, errno := theService.handler.Execute(args, cmdsToHandler, changesFromHandler) + exitFromHandler <- exitCode{ss, errno} + }() + + ec := exitCode{isSvcSpecific: true, errno: 0} + outcr := ChangeRequest{ + CurrentStatus: Status{State: Stopped}, + } + var outch chan ChangeRequest + inch := theService.c +loop: + for { + select { + case r := <-inch: + if r.errno != 0 { + ec.errno = r.errno + break loop + } + inch = nil + outch = cmdsToHandler + outcr.Cmd = r.cmd + outcr.EventType = r.eventType + outcr.EventData = r.eventData + outcr.Context = r.context + case outch <- outcr: + inch = theService.c + outch = nil + case c := <-changesFromHandler: + err := theService.updateStatus(&c, &ec) + if err != nil { + ec.errno = uint32(windows.ERROR_EXCEPTION_IN_SERVICE) + if err2, ok := err.(windows.Errno); ok { + ec.errno = uint32(err2) + } + break loop + } + outcr.CurrentStatus = c + case ec = <-exitFromHandler: + break loop + } + } + + theService.updateStatus(&Status{State: Stopped}, &ec) + + return windows.NO_ERROR +} + +// Run executes service name by calling appropriate handler function. +func Run(name string, handler Handler) error { + initCallbacks.Do(func() { + ctlHandlerCallback = windows.NewCallback(ctlHandler) + serviceMainCallback = windows.NewCallback(serviceMain) + }) + theService.name = name + theService.handler = handler + theService.c = make(chan ctlEvent) + t := []windows.SERVICE_TABLE_ENTRY{ + {ServiceName: windows.StringToUTF16Ptr(theService.name), ServiceProc: serviceMainCallback}, + {ServiceName: nil, ServiceProc: 0}, + } + return windows.StartServiceCtrlDispatcher(&t[0]) +} + +// StatusHandle returns service status handle. It is safe to call this function +// from inside the Handler.Execute because then it is guaranteed to be set. +func StatusHandle() windows.Handle { + return theService.h +} + +// DynamicStartReason returns the reason why the service was started. It is safe +// to call this function from inside the Handler.Execute because then it is +// guaranteed to be set. +func DynamicStartReason() (StartReason, error) { + var allocReason *uint32 + err := windows.QueryServiceDynamicInformation(theService.h, windows.SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON, unsafe.Pointer(&allocReason)) + if err != nil { + return 0, err + } + reason := StartReason(*allocReason) + windows.LocalFree(windows.Handle(unsafe.Pointer(allocReason))) + return reason, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 89d29c2a5..8133dfb28 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/DataWorkflowServices/dws v0.0.1-0.20231031201121-13a5a69a969e +# github.com/DataWorkflowServices/dws v0.0.1-0.20231204205237-79dec3ba94dd ## explicit; go 1.19 github.com/DataWorkflowServices/dws/api/v1alpha2 github.com/DataWorkflowServices/dws/config/crd/bases @@ -275,6 +275,9 @@ github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/takama/daemon v1.0.0 +## explicit; go 1.14 +github.com/takama/daemon # go.chromium.org/luci v0.0.0-20230227223707-c4460eb434d8 ## explicit; go 1.19 go.chromium.org/luci/common/runtime/goroutine @@ -303,7 +306,9 @@ go.uber.org/zap/internal/pool go.uber.org/zap/zapcore # golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e ## explicit; go 1.18 +golang.org/x/exp/constraints golang.org/x/exp/maps +golang.org/x/exp/slices # golang.org/x/net v0.13.0 ## explicit; go 1.17 golang.org/x/net/context @@ -329,6 +334,9 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry +golang.org/x/sys/windows/svc +golang.org/x/sys/windows/svc/mgr # golang.org/x/term v0.12.0 ## explicit; go 1.17 golang.org/x/term