Skip to content
This repository has been archived by the owner on Mar 29, 2022. It is now read-only.

Commit

Permalink
Merge pull request #32 from tmax-cloud/s3-export
Browse files Browse the repository at this point in the history
[Vmve] add export destination, s3
  • Loading branch information
hyoung-90 authored Sep 29, 2020
2 parents 6bff4ae + b25da38 commit 4c564d9
Show file tree
Hide file tree
Showing 15 changed files with 267 additions and 138 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: hypercloud.tmaxanc.com/v1alpha1
kind: VirtualMachineVolumeExport
metadata:
name: s3-export
spec:
virtualMachineVolume:
name: myrootdisk
destination:
s3:
# s3 endpoint with bucket and object name in virtual hosted style, https://bucketname.s3.region.amazonaws.com/objectkeyname
url: "http://ceph-bucket.rook-ceph-rgw-my-store.rook-ceph:80/disk.img"
# the name of secret which contains accessKeyID and secretAccessKey of the s3 endpoint
secretRef: secret-example
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,20 @@ spec:
description: VirtualMachineVolumeExportDestinationLocal defines
the Local destination to export volume
type: object
s3:
description: VirtualMachineVolumeExportDestinationS3 defines a s3
destination to export a volume
properties:
secretRef:
description: SecretRef is the secret reference which is needed
to access the S3 endpoint
type: string
url:
description: Url is the S3 endpoint
type: string
required:
- url
type: object
type: object
virtualMachineVolume:
description: VirtualMachineVolumeSource indicates the VirtualMachineVolume
Expand Down
8 changes: 8 additions & 0 deletions deploy/example/endpoint-secret.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: secret-example
type: Opaque
data:
AWS_ACCESS_KEY_ID: "QllVQjdMQUcwU1hMT0NHS0lCWUc=" # your key or user name (base64 encoded)
AWS_SECRET_ACCESS_KEY: "ZHFCWFRUUEs2c3U4TlFXY29XUzk2dURpREVGU0V0TnlwRTAzRDMxVQ==" # your secret or password (base64 encoded)
7 changes: 7 additions & 0 deletions deploy/rook-ceph/object-bucket-claim.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-delete-bucket
spec:
generateBucketName: ceph-bkt
storageClassName: rook-ceph-bucket
10 changes: 10 additions & 0 deletions deploy/rook-ceph/object-storageclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-ceph-bucket
provisioner: rook-ceph.ceph.rook.io/bucket
reclaimPolicy: Delete
parameters:
objectStoreName: my-store
objectStoreNamespace: rook-ceph
region: us-east-1
18 changes: 18 additions & 0 deletions deploy/rook-ceph/object-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: my-store
namespace: rook-ceph
spec:
metadataPool:
replicated:
size: 1
dataPool:
replicated:
size: 1
preservePoolsOnDelete: false
gateway:
type: s3
port: 80
securePort:
instances: 1
17 changes: 17 additions & 0 deletions docs/USERGUIDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,3 +151,20 @@ disk-export-exporter-local 1/1 Running 6 67m
# kubectl cp {vmve exporter pod name}:export/disk.img {local path to download}
$ kubectl cp disk-export-exporter-local:export/disk.img localpath.img
```

## Export volume to external object storage

``` shell
# Create k8s secret with your accessKeyId and secretAccessKey of the external endpoint
$ kubectl apply -f deploy/example/endpoint-secret.yaml
# Deploy export CR
# Your s3 endpoint must be written in virtual hosted style, https://bucket-name.endpoint/object-key-name
# You need to specify secretRef as well with the secret you created in the previous step
$ kubectl apply -f deploy/crds/hypercloud.tmaxanc.com_v1alpha1_virtualmachinevolumeexport_s3_cr.yaml
# Wait until export is completed
$ kubectl get vmve
NAME STATE
s3-export Completed
```
110 changes: 81 additions & 29 deletions e2e/virtualmachinevolumeexport.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@ package e2e

import (
"context"
"k8s.io/klog"
"kubevirt-image-service/pkg/apis"
"kubevirt-image-service/pkg/apis/hypercloud/v1alpha1"
"kubevirt-image-service/pkg/util"
"testing"

hc "kubevirt-image-service/pkg/apis/hypercloud/v1alpha1"

bktv1alpha1 "github.com/kube-object-storage/lib-bucket-provisioner/pkg/apis/objectbucket.io/v1alpha1"
framework "github.com/operator-framework/operator-sdk/pkg/test"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -26,16 +26,27 @@ func virtualMachineVolumeExportTest(t *testing.T, ctx *framework.Context) error
if err := framework.AddToFrameworkScheme(apis.AddToScheme, vmve); err != nil {
return err
}
if err := testLocalVmve(t, &cleanupOptions, ns); err != nil {
return err
}

if err := virtualMachineVolumeExportStateAvailableTest(t, &cleanupOptions, ns); err != nil {
bucket := &bktv1alpha1.ObjectBucketClaimList{}
if err := framework.AddToFrameworkScheme(bktv1alpha1.AddToScheme, bucket); err != nil {
klog.Errorf("%+v", err)
return err
}
if err := testS3Vmve(t, &cleanupOptions, ns); err != nil {
return err
}
return nil
}

func virtualMachineVolumeExportStateAvailableTest(t *testing.T, cleanupOptions *framework.CleanupOptions, ns string) error {
vmveName := "testvmve"
vmve := newVmve(ns, vmveName)
func testLocalVmve(t *testing.T, cleanupOptions *framework.CleanupOptions, ns string) error {
vmveName := "localvmve"
destination := v1alpha1.VirtualMachineVolumeExportDestination{
Local: &v1alpha1.VirtualMachineVolumeExportDestinationLocal{},
}
vmve := newVmve(ns, vmveName, destination)
if err := framework.Global.Client.Create(context.Background(), vmve, cleanupOptions); err != nil {
return err
}
Expand All @@ -54,28 +65,35 @@ func virtualMachineVolumeExportStateAvailableTest(t *testing.T, cleanupOptions *
return nil
}

func waitForVmveState(t *testing.T, namespace, name string) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
t.Logf("Waiting for creating vmve: %s in Namespace: %s \n", name, namespace)
vmve := &hc.VirtualMachineVolumeExport{}
if err = framework.Global.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, vmve); err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
found, cond := util.GetConditionByType(vmve.Status.Conditions, v1alpha1.VirtualMachineVolumeExportConditionReadyToUse)
if found {
// TODO: check error condition
return cond.Status == corev1.ConditionTrue, nil
}
return false, nil
})
func testS3Vmve(t *testing.T, cleanupOptions *framework.CleanupOptions, ns string) error {
bucketName := "ceph-bucket"
bucket := newBucket(ns, bucketName)
if err := framework.Global.Client.Create(context.Background(), bucket, cleanupOptions); err != nil {
return err
}

destination := v1alpha1.VirtualMachineVolumeExportDestination{
S3: &v1alpha1.VirtualMachineVolumeExportDestinationS3{
URL: "http://"+bucketName+".rook-ceph-rgw-my-store.rook-ceph:80/disk.img",
SecretRef: bucketName,
},
}
vmveName := "s3vmve"
vmve := newVmve(ns, vmveName, destination)
if err := framework.Global.Client.Create(context.Background(), vmve, cleanupOptions); err != nil {
return err
}

if err := waitForVmveState(t, ns, vmveName); err != nil {
return err
}
t.Logf("Vmve %s available\n", vmveName)

return nil
}

func newVmve(ns, vmveName string) *hc.VirtualMachineVolumeExport {
local := v1alpha1.VirtualMachineVolumeExportDestinationLocal{}
return &hc.VirtualMachineVolumeExport{
func newVmve(ns, vmveName string, destination v1alpha1.VirtualMachineVolumeExportDestination) *v1alpha1.VirtualMachineVolumeExport {
return &v1alpha1.VirtualMachineVolumeExport{
TypeMeta: v1.TypeMeta{
Kind: "VirtualMachineVolumeExport",
APIVersion: "hypercloud.tmaxanc.com/v1alpha1",
Expand All @@ -88,13 +106,47 @@ func newVmve(ns, vmveName string) *hc.VirtualMachineVolumeExport {
VirtualMachineVolume: v1alpha1.VirtualMachineVolumeSource{
Name: "availvmv",
},
Destination: v1alpha1.VirtualMachineVolumeExportDestination{
Local: &local,
},
Destination: destination,
},
}
}

func newBucket(ns, bucketName string) *bktv1alpha1.ObjectBucketClaim{
return &bktv1alpha1.ObjectBucketClaim{
TypeMeta: v1.TypeMeta{
Kind: "ObjectBucketClaim",
APIVersion: "objectbucket.io/v1alpha1",
},
ObjectMeta: v1.ObjectMeta{
Name: bucketName,
Namespace: ns,
},
Spec: bktv1alpha1.ObjectBucketClaimSpec{
BucketName: bucketName,
StorageClassName: "rook-ceph-bucket",
},
}
}

func waitForVmveState(t *testing.T, namespace, name string) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
t.Logf("Waiting for creating vmve: %s in Namespace: %s \n", name, namespace)
vmve := &v1alpha1.VirtualMachineVolumeExport{}
if err = framework.Global.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, vmve); err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
found, cond := util.GetConditionByType(vmve.Status.Conditions, v1alpha1.VirtualMachineVolumeExportConditionReadyToUse)
if found {
// TODO: check error condition
return cond.Status == corev1.ConditionTrue, nil
}
return false, nil
})
}

func waitForPod(t *testing.T, namespace, name string) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
t.Logf("Waiting for creating pod: %s in Namespace: %s \n", name, namespace)
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ require (
github.com/golang/protobuf v1.4.0 // indirect
github.com/imdario/mergo v0.3.8 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20200610144127-e2eec875d6d1
github.com/kubernetes-csi/external-snapshotter v1.2.2
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
Expand Down
Loading

0 comments on commit 4c564d9

Please sign in to comment.