diff --git a/go.mod b/go.mod
index bbc6e0e3c5..2be7b596d6 100644
--- a/go.mod
+++ b/go.mod
@@ -15,13 +15,13 @@ require (
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/container-storage-interface/spec v1.9.0
github.com/golang/protobuf v1.5.4
+ github.com/google/uuid v1.6.0
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1
github.com/kubernetes-csi/csi-lib-utils v0.14.1
github.com/kubernetes-csi/csi-proxy/client v1.0.1
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/onsi/ginkgo/v2 v2.20.2
github.com/onsi/gomega v1.34.2
- github.com/pborman/uuid v1.2.0
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021
github.com/stretchr/testify v1.9.0
go.uber.org/mock v0.4.0
@@ -87,7 +87,6 @@ require (
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
- github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
@@ -97,7 +96,6 @@ require (
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
- github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
diff --git a/go.sum b/go.sum
index 1ba72444a2..d0d9dde180 100644
--- a/go.sum
+++ b/go.sum
@@ -745,8 +745,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
@@ -839,8 +837,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
-github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -946,7 +942,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -1131,8 +1126,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
-github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
diff --git a/pkg/azurefile/azurefile.go b/pkg/azurefile/azurefile.go
index 48ec84ebc8..2ca099dcb1 100644
--- a/pkg/azurefile/azurefile.go
+++ b/pkg/azurefile/azurefile.go
@@ -31,8 +31,8 @@ import (
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/Azure/azure-storage-file-go/azfile"
"github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/google/uuid"
grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
- "github.com/pborman/uuid"
"github.com/rubiojr/go-vhd/vhd"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -632,7 +632,7 @@ func getValidFileShareName(volumeName string) string {
fileShareName = fileShareName[0:fileShareNameMaxLength]
}
if !checkShareNameBeginAndEnd(fileShareName) || len(fileShareName) < fileShareNameMinLength {
- fileShareName = util.GenerateVolumeName("pvc-file", uuid.NewUUID().String(), fileShareNameMaxLength)
+ fileShareName = util.GenerateVolumeName("pvc-file", uuid.NewString(), fileShareNameMaxLength)
klog.Warningf("the requested volume name (%q) is invalid, so it is regenerated as (%q)", volumeName, fileShareName)
}
fileShareName = strings.Replace(fileShareName, "--", "-", -1)
@@ -916,22 +916,27 @@ func (d *Driver) CreateFileShare(ctx context.Context, accountOptions *azure.Acco
return wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) {
var err error
if len(secrets) > 0 {
- accountName, accountKey, rerr := getStorageAccount(secrets)
- if rerr != nil {
- return true, rerr
+ var accountName, accountKey string
+ accountName, accountKey, err = getStorageAccount(secrets)
+ if err != nil {
+ return true, err
}
- fileClient, rerr := newAzureFileClient(accountName, accountKey, d.getStorageEndPointSuffix(), &retry.Backoff{Steps: 1})
- if rerr != nil {
- return true, rerr
+ var fileClient azureFileClient
+ fileClient, err = newAzureFileClient(accountName, accountKey, d.getStorageEndPointSuffix(), &retry.Backoff{Steps: 1})
+ if err != nil {
+ return true, err
}
err = fileClient.CreateFileShare(ctx, shareOptions)
} else {
_, err = d.cloud.FileClient.WithSubscriptionID(accountOptions.SubscriptionID).CreateFileShare(ctx, accountOptions.ResourceGroup, accountOptions.Name, shareOptions, "")
}
- if isRetriableError(err) {
- klog.Warningf("CreateFileShare(%s) on account(%s) failed with error(%v), waiting for retrying", shareOptions.Name, accountOptions.Name, err)
- sleepIfThrottled(err, fileOpThrottlingSleepSec)
- return false, nil
+ if err != nil {
+ if isRetriableError(err) {
+ klog.Warningf("CreateFileShare(%s) on account(%s) failed with error(%v), waiting for retrying", shareOptions.Name, accountOptions.Name, err)
+ sleepIfThrottled(err, fileOpThrottlingSleepSec)
+ return false, nil
+ }
+ klog.Errorf("CreateFileShare(%s) on account(%s) failed with error(%v)", shareOptions.Name, accountOptions.Name, err)
}
return true, err
})
diff --git a/pkg/azurefile/azurefile_dataplane_client.go b/pkg/azurefile/azurefile_dataplane_client.go
index 2dbc12070c..309f9da331 100644
--- a/pkg/azurefile/azurefile_dataplane_client.go
+++ b/pkg/azurefile/azurefile_dataplane_client.go
@@ -18,21 +18,21 @@ package azurefile
import (
"context"
+ "errors"
"fmt"
"net/http"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
- azs "github.com/Azure/azure-sdk-for-go/storage"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share"
"k8s.io/klog/v2"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
"sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
-const (
- useHTTPS = true
-)
-
var (
// refer https://github.com/Azure/azure-sdk-for-go/blob/master/storage/client.go#L88.
defaultValidStatusCodes = []int{
@@ -47,78 +47,88 @@ var (
type azureFileDataplaneClient struct {
accountName string
accountKey string
- *azs.FileServiceClient
+ *service.Client
}
func newAzureFileClient(accountName, accountKey, storageEndpointSuffix string, backoff *retry.Backoff) (azureFileClient, error) {
if storageEndpointSuffix == "" {
storageEndpointSuffix = defaultStorageEndPointSuffix
}
-
- fileClient, err := azs.NewClient(accountName, accountKey, storageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
+ keyCred, err := service.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return nil, fmt.Errorf("error creating azure client: %v", err)
}
-
+ storageEndpoint := fmt.Sprintf("https://%s.file."+storageEndpointSuffix, accountName)
+ clientOps := utils.GetDefaultAzCoreClientOption()
if backoff != nil {
- fileClient.Sender = &azs.DefaultSender{
- RetryAttempts: backoff.Steps,
- ValidStatusCodes: defaultValidStatusCodes,
- RetryDuration: backoff.Duration,
- }
+ clientOps.Retry.MaxRetries = int32(backoff.Steps)
+ clientOps.Retry.StatusCodes = defaultValidStatusCodes
+ clientOps.Retry.RetryDelay = backoff.Duration
+ }
+ fileClient, err := service.NewClientWithSharedKeyCredential(storageEndpoint, keyCred, &service.ClientOptions{
+ ClientOptions: clientOps,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error creating azure client: %v", err)
}
return &azureFileDataplaneClient{
- accountName: accountName,
- accountKey: accountKey,
- FileServiceClient: to.Ptr(fileClient.GetFileService()),
+ accountName: accountName,
+ accountKey: accountKey,
+ Client: fileClient,
}, nil
}
-func (f *azureFileDataplaneClient) CreateFileShare(_ context.Context, shareOptions *fileclient.ShareOptions) error {
+func (f *azureFileDataplaneClient) CreateFileShare(ctx context.Context, shareOptions *fileclient.ShareOptions) error {
if shareOptions == nil {
return fmt.Errorf("shareOptions of account(%s) is nil", f.accountName)
}
- share := f.FileServiceClient.GetShareReference(shareOptions.Name)
- share.Properties.Quota = shareOptions.RequestGiB
- newlyCreated, err := share.CreateIfNotExists(nil)
+ shareClient := f.Client.NewShareClient(shareOptions.Name)
+ _, err := shareClient.Create(ctx, &share.CreateOptions{
+ Quota: to.Ptr(int32(shareOptions.RequestGiB)),
+ })
+
if err != nil {
return fmt.Errorf("failed to create file share, err: %v", err)
}
- if !newlyCreated {
- klog.V(2).Infof("file share(%s) under account(%s) already exists", shareOptions.Name, f.accountName)
- }
return nil
}
// delete a file share
-func (f *azureFileDataplaneClient) DeleteFileShare(_ context.Context, shareName string) error {
- return f.FileServiceClient.GetShareReference(shareName).Delete(nil)
+func (f *azureFileDataplaneClient) DeleteFileShare(ctx context.Context, shareName string) error {
+ _, err := f.Client.NewShareClient(shareName).Delete(ctx, nil)
+ return err
}
-func (f *azureFileDataplaneClient) ResizeFileShare(_ context.Context, shareName string, sizeGiB int) error {
- share := f.FileServiceClient.GetShareReference(shareName)
- if share.Properties.Quota >= sizeGiB {
+func (f *azureFileDataplaneClient) ResizeFileShare(ctx context.Context, shareName string, sizeGiB int) error {
+ shareClient := f.Client.NewShareClient(shareName)
+ shareProps, err := shareClient.GetProperties(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to set quota on file share %s, err: %v", shareName, err)
+ }
+ if *shareProps.Quota >= int32(sizeGiB) {
klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
- share.Properties.Quota, sizeGiB, f.accountName, shareName)
+ *shareProps.Quota, sizeGiB, f.accountName, shareName)
return nil
}
- share.Properties.Quota = sizeGiB
- if err := share.SetProperties(nil); err != nil {
+ if _, err := shareClient.SetProperties(ctx, &share.SetPropertiesOptions{
+ Quota: to.Ptr(int32(sizeGiB)),
+ }); err != nil {
return fmt.Errorf("failed to set quota on file share %s, err: %v", shareName, err)
}
klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", f.accountName, shareName, sizeGiB)
return nil
}
-func (f *azureFileDataplaneClient) GetFileShareQuota(_ context.Context, name string) (int, error) {
- share := f.FileServiceClient.GetShareReference(name)
- exists, err := share.Exists()
+func (f *azureFileDataplaneClient) GetFileShareQuota(ctx context.Context, name string) (int, error) {
+ shareClient := f.Client.NewShareClient(name)
+ shareProps, err := shareClient.GetProperties(ctx, nil)
if err != nil {
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) && respErr != nil && respErr.StatusCode == http.StatusNotFound {
+ return -1, nil
+ }
return -1, err
}
- if !exists {
- return -1, nil
- }
- return share.Properties.Quota, nil
+ return int(*shareProps.Quota), nil
}
diff --git a/pkg/azurefile/azurefile_dataplane_client_test.go b/pkg/azurefile/azurefile_dataplane_client_test.go
index ddca292b41..9ea2d9aa32 100644
--- a/pkg/azurefile/azurefile_dataplane_client_test.go
+++ b/pkg/azurefile/azurefile_dataplane_client_test.go
@@ -59,7 +59,7 @@ func TestCreateFileShare(t *testing.T) {
func TestNewAzureFileClient(t *testing.T) {
_, actualErr := newAzureFileClient("ut", "ut", "ut", nil)
if actualErr != nil {
- expectedErr := fmt.Errorf("error creating azure client: azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: ut")
+ expectedErr := fmt.Errorf("error creating azure client: decode account key: illegal base64 data at input byte 0")
if !reflect.DeepEqual(actualErr, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", actualErr, expectedErr)
}
diff --git a/pkg/azurefile/azurefile_test.go b/pkg/azurefile/azurefile_test.go
index 0b4a8df1cc..e8352897e5 100644
--- a/pkg/azurefile/azurefile_test.go
+++ b/pkg/azurefile/azurefile_test.go
@@ -1001,7 +1001,7 @@ func TestGetFileShareQuota(t *testing.T) {
mockedFileShareResp: storage.FileShare{},
mockedFileShareErr: nil,
expectedQuota: -1,
- expectedError: fmt.Errorf("error creating azure client: azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: ut"),
+ expectedError: fmt.Errorf("error creating azure client: decode account key: illegal base64 data at input byte 4"),
},
}
diff --git a/pkg/azurefile/controllerserver.go b/pkg/azurefile/controllerserver.go
index 01e77eb47f..5ff9f336ed 100644
--- a/pkg/azurefile/controllerserver.go
+++ b/pkg/azurefile/controllerserver.go
@@ -33,7 +33,7 @@ import (
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/Azure/azure-storage-file-go/azfile"
"github.com/container-storage-interface/spec/lib/go/csi"
- "github.com/pborman/uuid"
+ "github.com/google/uuid"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -623,7 +623,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
diskName = validFileShareName + vhdSuffix
} else {
// use uuid as vhd disk name if file share specified
- diskName = uuid.NewUUID().String() + vhdSuffix
+ diskName = uuid.NewString() + vhdSuffix
}
diskSizeBytes := volumehelper.GiBToBytes(requestGiB)
klog.V(2).Infof("begin to create vhd file(%s) size(%d) on share(%s) on account(%s) type(%s) rg(%s) location(%s)",
diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go
index ee08eae867..254b873c57 100644
--- a/test/e2e/suite_test.go
+++ b/test/e2e/suite_test.go
@@ -28,10 +28,10 @@ import (
"strings"
"testing"
+ "github.com/google/uuid"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/gomega"
- "github.com/pborman/uuid"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
"sigs.k8s.io/azurefile-csi-driver/pkg/azurefile"
@@ -143,7 +143,7 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) {
driverOptions := azurefile.DriverOptions{
NodeID: os.Getenv("nodeid"),
DriverName: azurefile.DefaultDriverName,
- Endpoint: fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()),
+ Endpoint: fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewString()),
KubeConfig: kubeconfig,
}
azurefileDriver = azurefile.NewDriver(&driverOptions)
diff --git a/test/utils/credentials/credentials.go b/test/utils/credentials/credentials.go
index 24d56b509b..f2aabd509c 100644
--- a/test/utils/credentials/credentials.go
+++ b/test/utils/credentials/credentials.go
@@ -21,7 +21,7 @@ import (
"html/template"
"os"
- "github.com/pborman/uuid"
+ "github.com/google/uuid"
)
const (
@@ -124,7 +124,7 @@ func CreateAzureCredentialFile(isAzureChinaCloud bool) (*Credentials, error) {
}
if resourceGroup == "" {
- resourceGroup = ResourceGroupPrefix + uuid.NewUUID().String()
+ resourceGroup = ResourceGroupPrefix + uuid.NewString()
}
if location == "" {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
deleted file mode 100644
index 7e83a5c086..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Azure Storage SDK for Go (Preview)
-
-:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the
-future. Please use one of the following packages instead.
-
-| Service | Import Path/Repo |
-|---------|------------------|
-| Storage - Blobs | [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob) |
-| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) |
-| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) |
-| Storage - Tables | [github.com/Azure/azure-sdk-for-go/sdk/data/aztables](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/data/aztables)
-
-The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage
-[Azure Storage](https://docs.microsoft.com/azure/storage/) data plane
-resources: containers, blobs, tables, and queues.
-
-To manage storage *accounts* use Azure Resource Manager (ARM) via the packages
-at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/main/services/storage).
-
-This package also supports the [Azure Storage
-Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
-(Windows only).
-
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
deleted file mode 100644
index 306dd1b711..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "fmt"
- "net/http"
- "net/url"
- "time"
-)
-
-// PutAppendBlob initializes an empty append blob with specified name. An
-// append blob must be created using this method before appending blocks.
-//
-// See CreateBlockBlobFromReader for more info on creating blobs.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
-func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-blob-type"] = string(BlobTypeAppend)
- headers = mergeHeaders(headers, headersFromStruct(b.Properties))
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypeAppend)
-}
-
-// AppendBlockOptions includes the options for an append block operation
-type AppendBlockOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
- AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
- ContentMD5 bool
-}
-
-// AppendBlock appends a block to an append blob.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
-func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
- params := url.Values{"comp": {"appendblock"}}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- if options.ContentMD5 {
- md5sum := md5.Sum(chunk)
- headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
- }
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypeAppend)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
deleted file mode 100644
index 01741524af..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Package storage provides clients for Microsoft Azure Storage Services.
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "fmt"
- "net/url"
- "sort"
- "strings"
-)
-
-// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
-
-type authentication string
-
-const (
- sharedKey authentication = "sharedKey"
- sharedKeyForTable authentication = "sharedKeyTable"
- sharedKeyLite authentication = "sharedKeyLite"
- sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
-
- // headers
- headerAcceptCharset = "Accept-Charset"
- headerAuthorization = "Authorization"
- headerContentLength = "Content-Length"
- headerDate = "Date"
- headerXmsDate = "x-ms-date"
- headerXmsVersion = "x-ms-version"
- headerContentEncoding = "Content-Encoding"
- headerContentLanguage = "Content-Language"
- headerContentType = "Content-Type"
- headerContentMD5 = "Content-MD5"
- headerIfModifiedSince = "If-Modified-Since"
- headerIfMatch = "If-Match"
- headerIfNoneMatch = "If-None-Match"
- headerIfUnmodifiedSince = "If-Unmodified-Since"
- headerRange = "Range"
- headerDataServiceVersion = "DataServiceVersion"
- headerMaxDataServiceVersion = "MaxDataServiceVersion"
- headerContentTransferEncoding = "Content-Transfer-Encoding"
-)
-
-func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
- if !c.sasClient {
- authHeader, err := c.getSharedKey(verb, url, headers, auth)
- if err != nil {
- return nil, err
- }
- headers[headerAuthorization] = authHeader
- }
- return headers, nil
-}
-
-func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
- canRes, err := c.buildCanonicalizedResource(url, auth, false)
- if err != nil {
- return "", err
- }
-
- canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
- if err != nil {
- return "", err
- }
- return c.createAuthorizationHeader(canString, auth), nil
-}
-
-func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
- errMsg := "buildCanonicalizedResource error: %s"
- u, err := url.Parse(uri)
- if err != nil {
- return "", fmt.Errorf(errMsg, err.Error())
- }
-
- cr := bytes.NewBufferString("")
- if c.accountName != StorageEmulatorAccountName || !sas {
- cr.WriteString("/")
- cr.WriteString(c.getCanonicalizedAccountName())
- }
-
- if len(u.Path) > 0 {
- // Any portion of the CanonicalizedResource string that is derived from
- // the resource's URI should be encoded exactly as it is in the URI.
- // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
- cr.WriteString(u.EscapedPath())
- }
-
- params, err := url.ParseQuery(u.RawQuery)
- if err != nil {
- return "", fmt.Errorf(errMsg, err.Error())
- }
-
- // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
- if auth == sharedKey {
- if len(params) > 0 {
- cr.WriteString("\n")
-
- keys := []string{}
- for key := range params {
- keys = append(keys, key)
- }
- sort.Strings(keys)
-
- completeParams := []string{}
- for _, key := range keys {
- if len(params[key]) > 1 {
- sort.Strings(params[key])
- }
-
- completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
- }
- cr.WriteString(strings.Join(completeParams, "\n"))
- }
- } else {
- // search for "comp" parameter, if exists then add it to canonicalizedresource
- if v, ok := params["comp"]; ok {
- cr.WriteString("?comp=" + v[0])
- }
- }
-
- return string(cr.Bytes()), nil
-}
-
-func (c *Client) getCanonicalizedAccountName() string {
- // since we may be trying to access a secondary storage account, we need to
- // remove the -secondary part of the storage name
- return strings.TrimSuffix(c.accountName, "-secondary")
-}
-
-func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
- contentLength := headers[headerContentLength]
- if contentLength == "0" {
- contentLength = ""
- }
- date := headers[headerDate]
- if v, ok := headers[headerXmsDate]; ok {
- if auth == sharedKey || auth == sharedKeyLite {
- date = ""
- } else {
- date = v
- }
- }
- var canString string
- switch auth {
- case sharedKey:
- canString = strings.Join([]string{
- verb,
- headers[headerContentEncoding],
- headers[headerContentLanguage],
- contentLength,
- headers[headerContentMD5],
- headers[headerContentType],
- date,
- headers[headerIfModifiedSince],
- headers[headerIfMatch],
- headers[headerIfNoneMatch],
- headers[headerIfUnmodifiedSince],
- headers[headerRange],
- buildCanonicalizedHeader(headers),
- canonicalizedResource,
- }, "\n")
- case sharedKeyForTable:
- canString = strings.Join([]string{
- verb,
- headers[headerContentMD5],
- headers[headerContentType],
- date,
- canonicalizedResource,
- }, "\n")
- case sharedKeyLite:
- canString = strings.Join([]string{
- verb,
- headers[headerContentMD5],
- headers[headerContentType],
- date,
- buildCanonicalizedHeader(headers),
- canonicalizedResource,
- }, "\n")
- case sharedKeyLiteForTable:
- canString = strings.Join([]string{
- date,
- canonicalizedResource,
- }, "\n")
- default:
- return "", fmt.Errorf("%s authentication is not supported yet", auth)
- }
- return canString, nil
-}
-
-func buildCanonicalizedHeader(headers map[string]string) string {
- cm := make(map[string]string)
-
- for k, v := range headers {
- headerName := strings.TrimSpace(strings.ToLower(k))
- if strings.HasPrefix(headerName, "x-ms-") {
- cm[headerName] = v
- }
- }
-
- if len(cm) == 0 {
- return ""
- }
-
- keys := []string{}
- for key := range cm {
- keys = append(keys, key)
- }
-
- sort.Strings(keys)
-
- ch := bytes.NewBufferString("")
-
- for _, key := range keys {
- ch.WriteString(key)
- ch.WriteRune(':')
- ch.WriteString(cm[key])
- ch.WriteRune('\n')
- }
-
- return strings.TrimSuffix(string(ch.Bytes()), "\n")
-}
-
-func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
- signature := c.computeHmac256(canonicalizedString)
- var key string
- switch auth {
- case sharedKey, sharedKeyForTable:
- key = "SharedKey"
- case sharedKeyLite, sharedKeyLiteForTable:
- key = "SharedKeyLite"
- }
- return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
deleted file mode 100644
index 462e3dcf2f..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
+++ /dev/null
@@ -1,621 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-// A Blob is an entry in BlobListResponse.
-type Blob struct {
- Container *Container
- Name string `xml:"Name"`
- Snapshot time.Time `xml:"Snapshot"`
- Properties BlobProperties `xml:"Properties"`
- Metadata BlobMetadata `xml:"Metadata"`
-}
-
-// PutBlobOptions includes the options any put blob operation
-// (page, block, append)
-type PutBlobOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- Origin string `header:"Origin"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// BlobMetadata is a set of custom name/value pairs.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
-type BlobMetadata map[string]string
-
-type blobMetadataEntries struct {
- Entries []blobMetadataEntry `xml:",any"`
-}
-type blobMetadataEntry struct {
- XMLName xml.Name
- Value string `xml:",chardata"`
-}
-
-// UnmarshalXML converts the xml:Metadata into Metadata map
-func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
- var entries blobMetadataEntries
- if err := d.DecodeElement(&entries, &start); err != nil {
- return err
- }
- for _, entry := range entries.Entries {
- if *bm == nil {
- *bm = make(BlobMetadata)
- }
- (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
- }
- return nil
-}
-
-// MarshalXML implements the xml.Marshaler interface. It encodes
-// metadata name/value pairs as they would appear in an Azure
-// ListBlobs response.
-func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
- entries := make([]blobMetadataEntry, 0, len(bm))
- for k, v := range bm {
- entries = append(entries, blobMetadataEntry{
- XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
- Value: v,
- })
- }
- return enc.EncodeElement(blobMetadataEntries{
- Entries: entries,
- }, start)
-}
-
-// BlobProperties contains various properties of a blob
-// returned in various endpoints like ListBlobs or GetBlobProperties.
-type BlobProperties struct {
- LastModified TimeRFC1123 `xml:"Last-Modified"`
- Etag string `xml:"Etag"`
- ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
- ContentLength int64 `xml:"Content-Length"`
- ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
- ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
- CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
- ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
- ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
- BlobType BlobType `xml:"BlobType"`
- SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
- CopyID string `xml:"CopyId"`
- CopyStatus string `xml:"CopyStatus"`
- CopySource string `xml:"CopySource"`
- CopyProgress string `xml:"CopyProgress"`
- CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
- CopyStatusDescription string `xml:"CopyStatusDescription"`
- LeaseStatus string `xml:"LeaseStatus"`
- LeaseState string `xml:"LeaseState"`
- LeaseDuration string `xml:"LeaseDuration"`
- ServerEncrypted bool `xml:"ServerEncrypted"`
- IncrementalCopy bool `xml:"IncrementalCopy"`
-}
-
-// BlobType defines the type of the Azure Blob.
-type BlobType string
-
-// Types of page blobs
-const (
- BlobTypeBlock BlobType = "BlockBlob"
- BlobTypePage BlobType = "PageBlob"
- BlobTypeAppend BlobType = "AppendBlob"
-)
-
-func (b *Blob) buildPath() string {
- return b.Container.buildPath() + "/" + b.Name
-}
-
-// Exists returns true if a blob with given name exists on the specified
-// container of the storage account.
-func (b *Blob) Exists() (bool, error) {
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
- headers := b.Container.bsc.client.getStandardHeaders()
- resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusOK, nil
- }
- }
- return false, err
-}
-
-// GetURL gets the canonical URL to the blob with the specified name in the
-// specified container.
-// This method does not create a publicly accessible URL if the blob or container
-// is private and this method does not check if the blob exists.
-func (b *Blob) GetURL() string {
- container := b.Container.Name
- if container == "" {
- container = "$root"
- }
- return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
-}
-
-// GetBlobRangeOptions includes the options for a get blob range operation
-type GetBlobRangeOptions struct {
- Range *BlobRange
- GetRangeContentMD5 bool
- *GetBlobOptions
-}
-
-// GetBlobOptions includes the options for a get blob operation
-type GetBlobOptions struct {
- Timeout uint
- Snapshot *time.Time
- LeaseID string `header:"x-ms-lease-id"`
- Origin string `header:"Origin"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// BlobRange represents the bytes range to be get
-type BlobRange struct {
- Start uint64
- End uint64
-}
-
-func (br BlobRange) String() string {
- if br.End == 0 {
- return fmt.Sprintf("bytes=%d-", br.Start)
- }
- return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
-}
-
-// Get returns a stream to read the blob. Caller must call both Read and Close()
-// to correctly close the underlying connection.
-//
-// See the GetRange method for use with a Range header.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
-func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
- rangeOptions := GetBlobRangeOptions{
- GetBlobOptions: options,
- }
- resp, err := b.getRange(&rangeOptions)
- if err != nil {
- return nil, err
- }
-
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
- if err := b.writeProperties(resp.Header, true); err != nil {
- return resp.Body, err
- }
- return resp.Body, nil
-}
-
-// GetRange reads the specified range of a blob to a stream. The bytesRange
-// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
-// Caller must call both Read and Close()// to correctly close the underlying
-// connection.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
-func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
- resp, err := b.getRange(options)
- if err != nil {
- return nil, err
- }
-
- if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil {
- return nil, err
- }
- // Content-Length header should not be updated, as the service returns the range length
- // (which is not alwys the full blob length)
- if err := b.writeProperties(resp.Header, false); err != nil {
- return resp.Body, err
- }
- return resp.Body, nil
-}
-
-func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- if options.Range != nil {
- headers["Range"] = options.Range.String()
- if options.GetRangeContentMD5 {
- headers["x-ms-range-get-content-md5"] = "true"
- }
- }
- if options.GetBlobOptions != nil {
- headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- }
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return nil, err
- }
- return resp, err
-}
-
-// SnapshotOptions includes the options for a snapshot blob operation
-type SnapshotOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// CreateSnapshot creates a snapshot for a blob
-// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
-func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
- params := url.Values{"comp": {"snapshot"}}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil || resp == nil {
- return nil, err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
- return nil, err
- }
-
- snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
- if snapshotResponse != "" {
- snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
- if err != nil {
- return nil, err
- }
- return &snapshotTimestamp, nil
- }
-
- return nil, errors.New("Snapshot not created")
-}
-
-// GetBlobPropertiesOptions includes the options for a get blob properties operation
-type GetBlobPropertiesOptions struct {
- Timeout uint
- Snapshot *time.Time
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetProperties provides various information about the specified blob.
-// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
-func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
- return b.writeProperties(resp.Header, true)
-}
-
-func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
- var err error
-
- contentLength := b.Properties.ContentLength
- if includeContentLen {
- contentLengthStr := h.Get("Content-Length")
- if contentLengthStr != "" {
- contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
- if err != nil {
- return err
- }
- }
- }
-
- var sequenceNum int64
- sequenceNumStr := h.Get("x-ms-blob-sequence-number")
- if sequenceNumStr != "" {
- sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
- if err != nil {
- return err
- }
- }
-
- lastModified, err := getTimeFromHeaders(h, "Last-Modified")
- if err != nil {
- return err
- }
-
- copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
- if err != nil {
- return err
- }
-
- b.Properties = BlobProperties{
- LastModified: TimeRFC1123(*lastModified),
- Etag: h.Get("Etag"),
- ContentMD5: h.Get("Content-MD5"),
- ContentLength: contentLength,
- ContentEncoding: h.Get("Content-Encoding"),
- ContentType: h.Get("Content-Type"),
- ContentDisposition: h.Get("Content-Disposition"),
- CacheControl: h.Get("Cache-Control"),
- ContentLanguage: h.Get("Content-Language"),
- SequenceNumber: sequenceNum,
- CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
- CopyStatusDescription: h.Get("x-ms-copy-status-description"),
- CopyID: h.Get("x-ms-copy-id"),
- CopyProgress: h.Get("x-ms-copy-progress"),
- CopySource: h.Get("x-ms-copy-source"),
- CopyStatus: h.Get("x-ms-copy-status"),
- BlobType: BlobType(h.Get("x-ms-blob-type")),
- LeaseStatus: h.Get("x-ms-lease-status"),
- LeaseState: h.Get("x-ms-lease-state"),
- }
- b.writeMetadata(h)
- return nil
-}
-
-// SetBlobPropertiesOptions contains various properties of a blob and is an entry
-// in SetProperties
-type SetBlobPropertiesOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- Origin string `header:"Origin"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- SequenceNumberAction *SequenceNumberAction
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// SequenceNumberAction defines how the blob's sequence number should be modified
-type SequenceNumberAction string
-
-// Options for sequence number action
-const (
- SequenceNumberActionMax SequenceNumberAction = "max"
- SequenceNumberActionUpdate SequenceNumberAction = "update"
- SequenceNumberActionIncrement SequenceNumberAction = "increment"
-)
-
-// SetProperties replaces the BlobHeaders for the specified blob.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetBlobProperties. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
-func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
- params := url.Values{"comp": {"properties"}}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers = mergeHeaders(headers, headersFromStruct(b.Properties))
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- if b.Properties.BlobType == BlobTypePage {
- headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
- if options != nil && options.SequenceNumberAction != nil {
- headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
- if *options.SequenceNumberAction != SequenceNumberActionIncrement {
- headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
- }
- }
- }
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusOK})
-}
-
-// SetBlobMetadataOptions includes the options for a set blob metadata operation
-type SetBlobMetadataOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// SetMetadata replaces the metadata for the specified blob.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetBlobMetadata. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
-func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
- params := url.Values{"comp": {"metadata"}}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusOK})
-}
-
-// GetBlobMetadataOptions includes the options for a get blob metadata operation
-type GetBlobMetadataOptions struct {
- Timeout uint
- Snapshot *time.Time
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetMetadata returns all user-defined metadata for the specified blob.
-//
-// All metadata keys will be returned in lower case. (HTTP header
-// names are case-insensitive.)
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
-func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
- params := url.Values{"comp": {"metadata"}}
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- b.writeMetadata(resp.Header)
- return nil
-}
-
-func (b *Blob) writeMetadata(h http.Header) {
- b.Metadata = BlobMetadata(writeMetadata(h))
-}
-
-// DeleteBlobOptions includes the options for a delete blob operation
-type DeleteBlobOptions struct {
- Timeout uint
- Snapshot *time.Time
- LeaseID string `header:"x-ms-lease-id"`
- DeleteSnapshots *bool
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Delete deletes the given blob from the specified container.
-// If the blob does not exist at the time of the Delete Blob operation, it
-// returns error.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
-func (b *Blob) Delete(options *DeleteBlobOptions) error {
- resp, err := b.delete(options)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusAccepted})
-}
-
-// DeleteIfExists deletes the given blob from the specified container If the
-// blob is deleted with this call, returns true. Otherwise returns false.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
-func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
- resp, err := b.delete(options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusAccepted, nil
- }
- }
- return false, err
-}
-
-func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- if options.DeleteSnapshots != nil {
- if *options.DeleteSnapshots {
- headers["x-ms-delete-snapshots"] = "include"
- } else {
- headers["x-ms-delete-snapshots"] = "only"
- }
- }
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
- return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
-}
-
-// helper method to construct the path to either a blob or container
-func pathForResource(container, name string) string {
- if name != "" {
- return fmt.Sprintf("/%s/%s", container, name)
- }
- return fmt.Sprintf("/%s", container)
-}
-
-func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error {
- defer drainRespBody(resp)
- err := checkRespCode(resp, []int{http.StatusCreated})
- if err != nil {
- return err
- }
- b.Properties.BlobType = bt
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
deleted file mode 100644
index 89ab054ec2..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "errors"
- "fmt"
- "net/url"
- "strings"
- "time"
-)
-
-// OverrideHeaders defines overridable response heaedrs in
-// a request using a SAS URI.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-type OverrideHeaders struct {
- CacheControl string
- ContentDisposition string
- ContentEncoding string
- ContentLanguage string
- ContentType string
-}
-
-// BlobSASOptions are options to construct a blob SAS
-// URI.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-type BlobSASOptions struct {
- BlobServiceSASPermissions
- OverrideHeaders
- SASOptions
-}
-
-// BlobServiceSASPermissions includes the available permissions for
-// blob service SAS URI.
-type BlobServiceSASPermissions struct {
- Read bool
- Add bool
- Create bool
- Write bool
- Delete bool
-}
-
-func (p BlobServiceSASPermissions) buildString() string {
- permissions := ""
- if p.Read {
- permissions += "r"
- }
- if p.Add {
- permissions += "a"
- }
- if p.Create {
- permissions += "c"
- }
- if p.Write {
- permissions += "w"
- }
- if p.Delete {
- permissions += "d"
- }
- return permissions
-}
-
-// GetSASURI creates an URL to the blob which contains the Shared
-// Access Signature with the specified options.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
- uri := b.GetURL()
- signedResource := "b"
- canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
- if err != nil {
- return "", err
- }
-
- permissions := options.BlobServiceSASPermissions.buildString()
- return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
-}
-
-func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
- start := ""
- if options.Start != (time.Time{}) {
- start = options.Start.UTC().Format(time.RFC3339)
- }
-
- expiry := options.Expiry.UTC().Format(time.RFC3339)
-
- // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
- canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
- canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
- if err != nil {
- return "", err
- }
-
- protocols := ""
- if options.UseHTTPS {
- protocols = "https"
- }
- stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers)
- if err != nil {
- return "", err
- }
-
- sig := c.computeHmac256(stringToSign)
- sasParams := url.Values{
- "sv": {c.apiVersion},
- "se": {expiry},
- "sr": {signedResource},
- "sp": {permissions},
- "sig": {sig},
- }
-
- if start != "" {
- sasParams.Add("st", start)
- }
-
- if c.apiVersion >= "2015-04-05" {
- if protocols != "" {
- sasParams.Add("spr", protocols)
- }
- if options.IP != "" {
- sasParams.Add("sip", options.IP)
- }
- }
-
- // Add override response hedaers
- addQueryParameter(sasParams, "rscc", headers.CacheControl)
- addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
- addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
- addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
- addQueryParameter(sasParams, "rsct", headers.ContentType)
-
- sasURL, err := url.Parse(uri)
- if err != nil {
- return "", err
- }
- sasURL.RawQuery = sasParams.Encode()
- return sasURL.String(), nil
-}
-
-func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) {
- rscc := headers.CacheControl
- rscd := headers.ContentDisposition
- rsce := headers.ContentEncoding
- rscl := headers.ContentLanguage
- rsct := headers.ContentType
-
- if signedVersion >= "2015-02-21" {
- canonicalizedResource = "/blob" + canonicalizedResource
- }
-
- // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
- if signedVersion >= "2018-11-09" {
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil
- }
-
- // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
- if signedVersion >= "2015-04-05" {
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
- }
-
- // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
- if signedVersion >= "2013-08-15" {
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
- }
-
- return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
deleted file mode 100644
index 0a985b22b0..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// BlobStorageClient contains operations for Microsoft Azure Blob Storage
-// Service.
-type BlobStorageClient struct {
- client Client
- auth authentication
-}
-
-// GetServiceProperties gets the properties of your storage account's blob service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
-func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
- return b.client.getServiceProperties(blobServiceName, b.auth)
-}
-
-// SetServiceProperties sets the properties of your storage account's blob service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
-func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
- return b.client.setServiceProperties(props, blobServiceName, b.auth)
-}
-
-// ListContainersParameters defines the set of customizable parameters to make a
-// List Containers call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
-type ListContainersParameters struct {
- Prefix string
- Marker string
- Include string
- MaxResults uint
- Timeout uint
-}
-
-// GetContainerReference returns a Container object for the specified container name.
-func (b *BlobStorageClient) GetContainerReference(name string) *Container {
- return &Container{
- bsc: b,
- Name: name,
- }
-}
-
-// GetContainerReferenceFromSASURI returns a Container object for the specified
-// container SASURI
-func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
- path := strings.Split(sasuri.Path, "/")
- if len(path) <= 1 {
- return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
- }
- c, err := newSASClientFromURL(&sasuri)
- if err != nil {
- return nil, err
- }
- cli := c.GetBlobService()
- return &Container{
- bsc: &cli,
- Name: path[1],
- sasuri: sasuri,
- }, nil
-}
-
-// ListContainers returns the list of containers in a storage account along with
-// pagination token and other response details.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
-func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
- q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
- uri := b.client.getEndpoint(blobServiceName, "", q)
- headers := b.client.getStandardHeaders()
-
- type ContainerAlias struct {
- bsc *BlobStorageClient
- Name string `xml:"Name"`
- Properties ContainerProperties `xml:"Properties"`
- Metadata BlobMetadata
- sasuri url.URL
- }
- type ContainerListResponseAlias struct {
- XMLName xml.Name `xml:"EnumerationResults"`
- Xmlns string `xml:"xmlns,attr"`
- Prefix string `xml:"Prefix"`
- Marker string `xml:"Marker"`
- NextMarker string `xml:"NextMarker"`
- MaxResults int64 `xml:"MaxResults"`
- Containers []ContainerAlias `xml:"Containers>Container"`
- }
-
- var outAlias ContainerListResponseAlias
- resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- err = xmlUnmarshal(resp.Body, &outAlias)
- if err != nil {
- return nil, err
- }
-
- out := ContainerListResponse{
- XMLName: outAlias.XMLName,
- Xmlns: outAlias.Xmlns,
- Prefix: outAlias.Prefix,
- Marker: outAlias.Marker,
- NextMarker: outAlias.NextMarker,
- MaxResults: outAlias.MaxResults,
- Containers: make([]Container, len(outAlias.Containers)),
- }
- for i, cnt := range outAlias.Containers {
- out.Containers[i] = Container{
- bsc: &b,
- Name: cnt.Name,
- Properties: cnt.Properties,
- Metadata: map[string]string(cnt.Metadata),
- sasuri: cnt.sasuri,
- }
- }
-
- return &out, err
-}
-
-func (p ListContainersParameters) getParameters() url.Values {
- out := url.Values{}
-
- if p.Prefix != "" {
- out.Set("prefix", p.Prefix)
- }
- if p.Marker != "" {
- out.Set("marker", p.Marker)
- }
- if p.Include != "" {
- out.Set("include", p.Include)
- }
- if p.MaxResults != 0 {
- out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
- }
- if p.Timeout != 0 {
- out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
- }
-
- return out
-}
-
-func writeMetadata(h http.Header) map[string]string {
- metadata := make(map[string]string)
- for k, v := range h {
- // Can't trust CanonicalHeaderKey() to munge case
- // reliably. "_" is allowed in identifiers:
- // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
- // https://msdn.microsoft.com/library/aa664670(VS.71).aspx
- // http://tools.ietf.org/html/rfc7230#section-3.2
- // ...but "_" is considered invalid by
- // CanonicalMIMEHeaderKey in
- // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
- // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
- k = strings.ToLower(k)
- if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
- continue
- }
- // metadata["lol"] = content of the last X-Ms-Meta-Lol header
- k = k[len(userDefinedMetadataHeaderPrefix):]
- metadata[k] = v[len(v)-1]
- }
- return metadata
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
deleted file mode 100644
index 9d445decfd..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-// BlockListType is used to filter out types of blocks in a Get Blocks List call
-// for a block blob.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
-// block types.
-type BlockListType string
-
-// Filters for listing blocks in block blobs
-const (
- BlockListTypeAll BlockListType = "all"
- BlockListTypeCommitted BlockListType = "committed"
- BlockListTypeUncommitted BlockListType = "uncommitted"
-)
-
-// Maximum sizes (per REST API) for various concepts
-const (
- MaxBlobBlockSize = 100 * 1024 * 1024
- MaxBlobPageSize = 4 * 1024 * 1024
-)
-
-// BlockStatus defines states a block for a block blob can
-// be in.
-type BlockStatus string
-
-// List of statuses that can be used to refer to a block in a block list
-const (
- BlockStatusUncommitted BlockStatus = "Uncommitted"
- BlockStatusCommitted BlockStatus = "Committed"
- BlockStatusLatest BlockStatus = "Latest"
-)
-
-// Block is used to create Block entities for Put Block List
-// call.
-type Block struct {
- ID string
- Status BlockStatus
-}
-
-// BlockListResponse contains the response fields from Get Block List call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
-type BlockListResponse struct {
- XMLName xml.Name `xml:"BlockList"`
- CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
- UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
-}
-
-// BlockResponse contains the block information returned
-// in the GetBlockListCall.
-type BlockResponse struct {
- Name string `xml:"Name"`
- Size int64 `xml:"Size"`
-}
-
-// CreateBlockBlob initializes an empty block blob with no blocks.
-//
-// See CreateBlockBlobFromReader for more info on creating blobs.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
-func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
- return b.CreateBlockBlobFromReader(nil, options)
-}
-
-// CreateBlockBlobFromReader initializes a block blob using data from
-// reader. Size must be the number of bytes read from reader. To
-// create an empty blob, use size==0 and reader==nil.
-//
-// Any headers set in blob.Properties or metadata in blob.Metadata
-// will be set on the blob.
-//
-// The API rejects requests with size > 256 MiB (but this limit is not
-// checked by the SDK). To write a larger blob, use CreateBlockBlob,
-// PutBlock, and PutBlockList.
-//
-// To create a blob from scratch, call container.GetBlobReference() to
-// get an empty blob, fill in blob.Properties and blob.Metadata as
-// appropriate then call this method.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
-func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-blob-type"] = string(BlobTypeBlock)
-
- headers["Content-Length"] = "0"
- var n int64
- var err error
- if blob != nil {
- type lener interface {
- Len() int
- }
- // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
- if l, ok := blob.(lener); ok {
- n = int64(l.Len())
- } else {
- var buf bytes.Buffer
- n, err = io.Copy(&buf, blob)
- if err != nil {
- return err
- }
- blob = &buf
- }
-
- headers["Content-Length"] = strconv.FormatInt(n, 10)
- }
- b.Properties.ContentLength = n
-
- headers = mergeHeaders(headers, headersFromStruct(b.Properties))
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypeBlock)
-}
-
-// PutBlockOptions includes the options for a put block operation
-type PutBlockOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- ContentMD5 string `header:"Content-MD5"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// PutBlock saves the given data chunk to the specified block blob with
-// given ID.
-//
-// The API rejects chunks larger than 100 MiB (but this limit is not
-// checked by the SDK).
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
-func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
- return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
-}
-
-// PutBlockWithLength saves the given data stream of exactly specified size to
-// the block blob with given ID. It is an alternative to PutBlocks where data
-// comes as stream but the length is known in advance.
-//
-// The API rejects requests with size > 100 MiB (but this limit is not
-// checked by the SDK).
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
-func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
- query := url.Values{
- "comp": {"block"},
- "blockid": {blockID},
- }
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["Content-Length"] = fmt.Sprintf("%v", size)
-
- if options != nil {
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypeBlock)
-}
-
-// PutBlockFromURLOptions includes the options for a put block from URL operation
-type PutBlockFromURLOptions struct {
- PutBlockOptions
-
- SourceContentMD5 string `header:"x-ms-source-content-md5"`
- SourceContentCRC64 string `header:"x-ms-source-content-crc64"`
-}
-
-// PutBlockFromURL copy data of exactly specified size from specified URL to
-// the block blob with given ID. It is an alternative to PutBlocks where data
-// comes from a remote URL and the offset and length is known in advance.
-//
-// The API rejects requests with size > 100 MiB (but this limit is not
-// checked by the SDK).
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
-func (b *Blob) PutBlockFromURL(blockID string, blobURL string, offset int64, size uint64, options *PutBlockFromURLOptions) error {
- query := url.Values{
- "comp": {"block"},
- "blockid": {blockID},
- }
- headers := b.Container.bsc.client.getStandardHeaders()
- // The value of this header must be set to zero.
- // When the length is not zero, the operation will fail with the status code 400 (Bad Request).
- headers["Content-Length"] = "0"
- headers["x-ms-copy-source"] = blobURL
- headers["x-ms-source-range"] = fmt.Sprintf("bytes=%d-%d", offset, uint64(offset)+size-1)
-
- if options != nil {
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypeBlock)
-}
-
-// PutBlockListOptions includes the options for a put block list operation
-type PutBlockListOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// PutBlockList saves list of blocks to the specified block blob.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
-func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
- params := url.Values{"comp": {"blocklist"}}
- blockListXML := prepareBlockListRequest(blocks)
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
- headers = mergeHeaders(headers, headersFromStruct(b.Properties))
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusCreated})
-}
-
-// GetBlockListOptions includes the options for a get block list operation
-type GetBlockListOptions struct {
- Timeout uint
- Snapshot *time.Time
- LeaseID string `header:"x-ms-lease-id"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetBlockList retrieves list of blocks in the specified block blob.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
-func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
- params := url.Values{
- "comp": {"blocklist"},
- "blocklisttype": {string(blockType)},
- }
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- var out BlockListResponse
- resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return out, err
- }
- defer resp.Body.Close()
-
- err = xmlUnmarshal(resp.Body, &out)
- return out, err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
deleted file mode 100644
index ce6e5a80d8..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
+++ /dev/null
@@ -1,1061 +0,0 @@
-// Package storage provides clients for Microsoft Azure Storage Services.
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bufio"
- "encoding/base64"
- "encoding/json"
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "mime/multipart"
- "net/http"
- "net/url"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/version"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/azure"
-)
-
-const (
- // DefaultBaseURL is the domain name used for storage requests in the
- // public cloud when a default client is created.
- DefaultBaseURL = "core.windows.net"
-
- // DefaultAPIVersion is the Azure Storage API version string used when a
- // basic client is created.
- DefaultAPIVersion = "2018-03-28"
-
- defaultUseHTTPS = true
- defaultRetryAttempts = 5
- defaultRetryDuration = time.Second * 5
-
- // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
- StorageEmulatorAccountName = "devstoreaccount1"
-
- // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
- StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
-
- blobServiceName = "blob"
- tableServiceName = "table"
- queueServiceName = "queue"
- fileServiceName = "file"
-
- storageEmulatorBlob = "127.0.0.1:10000"
- storageEmulatorTable = "127.0.0.1:10002"
- storageEmulatorQueue = "127.0.0.1:10001"
-
- userAgentHeader = "User-Agent"
-
- userDefinedMetadataHeaderPrefix = "x-ms-meta-"
-
- connectionStringAccountName = "accountname"
- connectionStringAccountKey = "accountkey"
- connectionStringEndpointSuffix = "endpointsuffix"
- connectionStringEndpointProtocol = "defaultendpointsprotocol"
-
- connectionStringBlobEndpoint = "blobendpoint"
- connectionStringFileEndpoint = "fileendpoint"
- connectionStringQueueEndpoint = "queueendpoint"
- connectionStringTableEndpoint = "tableendpoint"
- connectionStringSAS = "sharedaccesssignature"
-)
-
-var (
- validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
- validCosmosAccount = regexp.MustCompile("^[0-9a-z-]{3,44}$")
- defaultValidStatusCodes = []int{
- http.StatusRequestTimeout, // 408
- http.StatusInternalServerError, // 500
- http.StatusBadGateway, // 502
- http.StatusServiceUnavailable, // 503
- http.StatusGatewayTimeout, // 504
- }
-)
-
-// Sender sends a request
-type Sender interface {
- Send(*Client, *http.Request) (*http.Response, error)
-}
-
-// DefaultSender is the default sender for the client. It implements
-// an automatic retry strategy.
-type DefaultSender struct {
- RetryAttempts int
- RetryDuration time.Duration
- ValidStatusCodes []int
- attempts int // used for testing
-}
-
-// Send is the default retry strategy in the client
-func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
- rr := autorest.NewRetriableRequest(req)
- for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
- err = rr.Prepare()
- if err != nil {
- return resp, err
- }
- resp, err = c.HTTPClient.Do(rr.Request())
- if err == nil && !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
- return resp, err
- }
- drainRespBody(resp)
- autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
- ds.attempts = attempts
- }
- ds.attempts++
- return resp, err
-}
-
-// Client is the object that needs to be constructed to perform
-// operations on the storage account.
-type Client struct {
- // HTTPClient is the http.Client used to initiate API
- // requests. http.DefaultClient is used when creating a
- // client.
- HTTPClient *http.Client
-
- // Sender is an interface that sends the request. Clients are
- // created with a DefaultSender. The DefaultSender has an
- // automatic retry strategy built in. The Sender can be customized.
- Sender Sender
-
- accountName string
- accountKey []byte
- useHTTPS bool
- UseSharedKeyLite bool
- baseURL string
- apiVersion string
- userAgent string
- sasClient bool
- accountSASToken url.Values
- additionalHeaders map[string]string
-}
-
-type odataResponse struct {
- resp *http.Response
- odata odataErrorWrapper
-}
-
-// AzureStorageServiceError contains fields of the error response from
-// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
-// Some fields might be specific to certain calls.
-type AzureStorageServiceError struct {
- Code string `xml:"Code"`
- Message string `xml:"Message"`
- AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
- QueryParameterName string `xml:"QueryParameterName"`
- QueryParameterValue string `xml:"QueryParameterValue"`
- Reason string `xml:"Reason"`
- Lang string
- StatusCode int
- RequestID string
- Date string
- APIVersion string
-}
-
-// AzureTablesServiceError contains fields of the error response from
-// Azure Table Storage Service REST API in Atom format.
-// See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
-type AzureTablesServiceError struct {
- Code string `xml:"code"`
- Message string `xml:"message"`
- StatusCode int
- RequestID string
- Date string
- APIVersion string
-}
-
-func (e AzureTablesServiceError) Error() string {
- return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s",
- e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion)
-}
-
-type odataErrorMessage struct {
- Lang string `json:"lang"`
- Value string `json:"value"`
-}
-
-type odataError struct {
- Code string `json:"code"`
- Message odataErrorMessage `json:"message"`
-}
-
-type odataErrorWrapper struct {
- Err odataError `json:"odata.error"`
-}
-
-// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
-// nor with an HTTP status code indicating success.
-type UnexpectedStatusCodeError struct {
- allowed []int
- got int
- inner error
-}
-
-func (e UnexpectedStatusCodeError) Error() string {
- s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
-
- got := s(e.got)
- expected := []string{}
- for _, v := range e.allowed {
- expected = append(expected, s(v))
- }
- return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner)
-}
-
-// Got is the actual status code returned by Azure.
-func (e UnexpectedStatusCodeError) Got() int {
- return e.got
-}
-
-// Inner returns any inner error info.
-func (e UnexpectedStatusCodeError) Inner() error {
- return e.inner
-}
-
-// NewClientFromConnectionString creates a Client from the connection string.
-func NewClientFromConnectionString(input string) (Client, error) {
- // build a map of connection string key/value pairs
- parts := map[string]string{}
- for _, pair := range strings.Split(input, ";") {
- if pair == "" {
- continue
- }
-
- equalDex := strings.IndexByte(pair, '=')
- if equalDex <= 0 {
- return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
- }
-
- value := strings.TrimSpace(pair[equalDex+1:])
- key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
- parts[key] = value
- }
-
- // TODO: validate parameter sets?
-
- if parts[connectionStringAccountName] == StorageEmulatorAccountName {
- return NewEmulatorClient()
- }
-
- if parts[connectionStringSAS] != "" {
- endpoint := ""
- if parts[connectionStringBlobEndpoint] != "" {
- endpoint = parts[connectionStringBlobEndpoint]
- } else if parts[connectionStringFileEndpoint] != "" {
- endpoint = parts[connectionStringFileEndpoint]
- } else if parts[connectionStringQueueEndpoint] != "" {
- endpoint = parts[connectionStringQueueEndpoint]
- } else {
- endpoint = parts[connectionStringTableEndpoint]
- }
-
- return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
- }
-
- useHTTPS := defaultUseHTTPS
- if parts[connectionStringEndpointProtocol] != "" {
- useHTTPS = parts[connectionStringEndpointProtocol] == "https"
- }
-
- return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
- parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
-}
-
-// NewBasicClient constructs a Client with given storage service name and
-// key.
-func NewBasicClient(accountName, accountKey string) (Client, error) {
- if accountName == StorageEmulatorAccountName {
- return NewEmulatorClient()
- }
- return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
-}
-
-// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
-// key in the referenced cloud.
-func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
- if accountName == StorageEmulatorAccountName {
- return NewEmulatorClient()
- }
- return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
-}
-
-//NewEmulatorClient contructs a Client intended to only work with Azure
-//Storage Emulator
-func NewEmulatorClient() (Client, error) {
- return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
-}
-
-// NewClient constructs a Client. This should be used if the caller wants
-// to specify whether to use HTTPS, a specific REST API version or a custom
-// storage endpoint than Azure Public Cloud.
-func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
- var c Client
- if !IsValidStorageAccount(accountName) {
- return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
- } else if accountKey == "" {
- return c, fmt.Errorf("azure: account key required")
- } else if serviceBaseURL == "" {
- return c, fmt.Errorf("azure: base storage service url required")
- }
-
- key, err := base64.StdEncoding.DecodeString(accountKey)
- if err != nil {
- return c, fmt.Errorf("azure: malformed storage account key: %v", err)
- }
-
- return newClient(accountName, key, serviceBaseURL, apiVersion, useHTTPS)
-}
-
-// NewCosmosClient constructs a Client for Azure CosmosDB. This should be used if the caller wants
-// to specify whether to use HTTPS, a specific REST API version or a custom
-// cosmos endpoint than Azure Public Cloud.
-func NewCosmosClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
- var c Client
- if !IsValidCosmosAccount(accountName) {
- return c, fmt.Errorf("azure: account name is not valid: The name can contain only lowercase letters, numbers and the '-' character, and must be between 3 and 44 characters: %v", accountName)
- } else if accountKey == "" {
- return c, fmt.Errorf("azure: account key required")
- } else if serviceBaseURL == "" {
- return c, fmt.Errorf("azure: base storage service url required")
- }
-
- key, err := base64.StdEncoding.DecodeString(accountKey)
- if err != nil {
- return c, fmt.Errorf("azure: malformed cosmos account key: %v", err)
- }
-
- return newClient(accountName, key, serviceBaseURL, apiVersion, useHTTPS)
-}
-
-// newClient constructs a Client with given parameters.
-func newClient(accountName string, accountKey []byte, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
- c := Client{
- HTTPClient: http.DefaultClient,
- accountName: accountName,
- accountKey: accountKey,
- useHTTPS: useHTTPS,
- baseURL: serviceBaseURL,
- apiVersion: apiVersion,
- sasClient: false,
- UseSharedKeyLite: false,
- Sender: &DefaultSender{
- RetryAttempts: defaultRetryAttempts,
- ValidStatusCodes: defaultValidStatusCodes,
- RetryDuration: defaultRetryDuration,
- },
- }
- c.userAgent = c.getDefaultUserAgent()
- return c, nil
-}
-
-// IsValidStorageAccount checks if the storage account name is valid.
-// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
-func IsValidStorageAccount(account string) bool {
- return validStorageAccount.MatchString(account)
-}
-
-// IsValidCosmosAccount checks if the Cosmos account name is valid.
-// See https://docs.microsoft.com/en-us/azure/cosmos-db/how-to-manage-database-account
-func IsValidCosmosAccount(account string) bool {
- return validCosmosAccount.MatchString(account)
-}
-
-// NewAccountSASClient contructs a client that uses accountSAS authorization
-// for its operations.
-func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
- return newSASClient(account, env.StorageEndpointSuffix, token)
-}
-
-// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
-// for its operations using the specified endpoint and SAS token.
-func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
- u, err := url.Parse(endpoint)
- if err != nil {
- return Client{}, err
- }
- _, err = url.ParseQuery(sasToken)
- if err != nil {
- return Client{}, err
- }
- u.RawQuery = sasToken
- return newSASClientFromURL(u)
-}
-
-func newSASClient(accountName, baseURL string, sasToken url.Values) Client {
- c := Client{
- HTTPClient: http.DefaultClient,
- apiVersion: DefaultAPIVersion,
- sasClient: true,
- Sender: &DefaultSender{
- RetryAttempts: defaultRetryAttempts,
- ValidStatusCodes: defaultValidStatusCodes,
- RetryDuration: defaultRetryDuration,
- },
- accountName: accountName,
- baseURL: baseURL,
- accountSASToken: sasToken,
- useHTTPS: defaultUseHTTPS,
- }
- c.userAgent = c.getDefaultUserAgent()
- // Get API version and protocol from token
- c.apiVersion = sasToken.Get("sv")
- if spr := sasToken.Get("spr"); spr != "" {
- c.useHTTPS = spr == "https"
- }
- return c
-}
-
-func newSASClientFromURL(u *url.URL) (Client, error) {
- // the host name will look something like this
- // - foo.blob.core.windows.net
- // "foo" is the account name
- // "core.windows.net" is the baseURL
-
- // find the first dot to get account name
- i1 := strings.IndexByte(u.Host, '.')
- if i1 < 0 {
- return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
- }
-
- // now find the second dot to get the base URL
- i2 := strings.IndexByte(u.Host[i1+1:], '.')
- if i2 < 0 {
- return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
- }
-
- sasToken := u.Query()
- c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken)
- if spr := sasToken.Get("spr"); spr == "" {
- // infer from URL if not in the query params set
- c.useHTTPS = u.Scheme == "https"
- }
- return c, nil
-}
-
-func (c Client) isServiceSASClient() bool {
- return c.sasClient && c.accountSASToken == nil
-}
-
-func (c Client) isAccountSASClient() bool {
- return c.sasClient && c.accountSASToken != nil
-}
-
-func (c Client) getDefaultUserAgent() string {
- return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
- runtime.Version(),
- runtime.GOARCH,
- runtime.GOOS,
- version.Number,
- c.apiVersion,
- )
-}
-
-// AddToUserAgent adds an extension to the current user agent
-func (c *Client) AddToUserAgent(extension string) error {
- if extension != "" {
- c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
- return nil
- }
- return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
-}
-
-// AddAdditionalHeaders adds additional standard headers
-func (c *Client) AddAdditionalHeaders(headers map[string]string) {
- if headers != nil {
- c.additionalHeaders = map[string]string{}
- for k, v := range headers {
- c.additionalHeaders[k] = v
- }
- }
-}
-
-// protectUserAgent is used in funcs that include extraheaders as a parameter.
-// It prevents the User-Agent header to be overwritten, instead if it happens to
-// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
-func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
- if v, ok := extraheaders[userAgentHeader]; ok {
- c.AddToUserAgent(v)
- delete(extraheaders, userAgentHeader)
- }
- return extraheaders
-}
-
-func (c Client) getBaseURL(service string) *url.URL {
- scheme := "http"
- if c.useHTTPS {
- scheme = "https"
- }
- host := ""
- if c.accountName == StorageEmulatorAccountName {
- switch service {
- case blobServiceName:
- host = storageEmulatorBlob
- case tableServiceName:
- host = storageEmulatorTable
- case queueServiceName:
- host = storageEmulatorQueue
- }
- } else {
- host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
- }
-
- return &url.URL{
- Scheme: scheme,
- Host: host,
- }
-}
-
-func (c Client) getEndpoint(service, path string, params url.Values) string {
- u := c.getBaseURL(service)
-
- // API doesn't accept path segments not starting with '/'
- if !strings.HasPrefix(path, "/") {
- path = fmt.Sprintf("/%v", path)
- }
-
- if c.accountName == StorageEmulatorAccountName {
- path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
- }
-
- u.Path = path
- u.RawQuery = params.Encode()
- return u.String()
-}
-
-// AccountSASTokenOptions includes options for constructing
-// an account SAS token.
-// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
-type AccountSASTokenOptions struct {
- APIVersion string
- Services Services
- ResourceTypes ResourceTypes
- Permissions Permissions
- Start time.Time
- Expiry time.Time
- IP string
- UseHTTPS bool
-}
-
-// Services specify services accessible with an account SAS.
-type Services struct {
- Blob bool
- Queue bool
- Table bool
- File bool
-}
-
-// ResourceTypes specify the resources accesible with an
-// account SAS.
-type ResourceTypes struct {
- Service bool
- Container bool
- Object bool
-}
-
-// Permissions specifies permissions for an accountSAS.
-type Permissions struct {
- Read bool
- Write bool
- Delete bool
- List bool
- Add bool
- Create bool
- Update bool
- Process bool
-}
-
-// GetAccountSASToken creates an account SAS token
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
-func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
- if options.APIVersion == "" {
- options.APIVersion = c.apiVersion
- }
-
- if options.APIVersion < "2015-04-05" {
- return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
- }
-
- // build services string
- services := ""
- if options.Services.Blob {
- services += "b"
- }
- if options.Services.Queue {
- services += "q"
- }
- if options.Services.Table {
- services += "t"
- }
- if options.Services.File {
- services += "f"
- }
-
- // build resources string
- resources := ""
- if options.ResourceTypes.Service {
- resources += "s"
- }
- if options.ResourceTypes.Container {
- resources += "c"
- }
- if options.ResourceTypes.Object {
- resources += "o"
- }
-
- // build permissions string
- permissions := ""
- if options.Permissions.Read {
- permissions += "r"
- }
- if options.Permissions.Write {
- permissions += "w"
- }
- if options.Permissions.Delete {
- permissions += "d"
- }
- if options.Permissions.List {
- permissions += "l"
- }
- if options.Permissions.Add {
- permissions += "a"
- }
- if options.Permissions.Create {
- permissions += "c"
- }
- if options.Permissions.Update {
- permissions += "u"
- }
- if options.Permissions.Process {
- permissions += "p"
- }
-
- // build start time, if exists
- start := ""
- if options.Start != (time.Time{}) {
- start = options.Start.UTC().Format(time.RFC3339)
- }
-
- // build expiry time
- expiry := options.Expiry.UTC().Format(time.RFC3339)
-
- protocol := "https,http"
- if options.UseHTTPS {
- protocol = "https"
- }
-
- stringToSign := strings.Join([]string{
- c.accountName,
- permissions,
- services,
- resources,
- start,
- expiry,
- options.IP,
- protocol,
- options.APIVersion,
- "",
- }, "\n")
- signature := c.computeHmac256(stringToSign)
-
- sasParams := url.Values{
- "sv": {options.APIVersion},
- "ss": {services},
- "srt": {resources},
- "sp": {permissions},
- "se": {expiry},
- "spr": {protocol},
- "sig": {signature},
- }
- if start != "" {
- sasParams.Add("st", start)
- }
- if options.IP != "" {
- sasParams.Add("sip", options.IP)
- }
-
- return sasParams, nil
-}
-
-// GetBlobService returns a BlobStorageClient which can operate on the blob
-// service of the storage account.
-func (c Client) GetBlobService() BlobStorageClient {
- b := BlobStorageClient{
- client: c,
- }
- b.client.AddToUserAgent(blobServiceName)
- b.auth = sharedKey
- if c.UseSharedKeyLite {
- b.auth = sharedKeyLite
- }
- return b
-}
-
-// GetQueueService returns a QueueServiceClient which can operate on the queue
-// service of the storage account.
-func (c Client) GetQueueService() QueueServiceClient {
- q := QueueServiceClient{
- client: c,
- }
- q.client.AddToUserAgent(queueServiceName)
- q.auth = sharedKey
- if c.UseSharedKeyLite {
- q.auth = sharedKeyLite
- }
- return q
-}
-
-// GetTableService returns a TableServiceClient which can operate on the table
-// service of the storage account.
-func (c Client) GetTableService() TableServiceClient {
- t := TableServiceClient{
- client: c,
- }
- t.client.AddToUserAgent(tableServiceName)
- t.auth = sharedKeyForTable
- if c.UseSharedKeyLite {
- t.auth = sharedKeyLiteForTable
- }
- return t
-}
-
-// GetFileService returns a FileServiceClient which can operate on the file
-// service of the storage account.
-func (c Client) GetFileService() FileServiceClient {
- f := FileServiceClient{
- client: c,
- }
- f.client.AddToUserAgent(fileServiceName)
- f.auth = sharedKey
- if c.UseSharedKeyLite {
- f.auth = sharedKeyLite
- }
- return f
-}
-
-func (c Client) getStandardHeaders() map[string]string {
- headers := map[string]string{}
- for k, v := range c.additionalHeaders {
- headers[k] = v
- }
-
- headers[userAgentHeader] = c.userAgent
- headers["x-ms-version"] = c.apiVersion
- headers["x-ms-date"] = currentTimeRfc1123Formatted()
-
- return headers
-}
-
-func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) {
- headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
- if err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest(verb, url, body)
- if err != nil {
- return nil, errors.New("azure/storage: error creating request: " + err.Error())
- }
-
- // http.NewRequest() will automatically set req.ContentLength for a handful of types
- // otherwise we will handle here.
- if req.ContentLength < 1 {
- if clstr, ok := headers["Content-Length"]; ok {
- if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
- req.ContentLength = cl
- }
- }
- }
-
- for k, v := range headers {
- req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
- }
-
- if c.isAccountSASClient() {
- // append the SAS token to the query params
- v := req.URL.Query()
- v = mergeParams(v, c.accountSASToken)
- req.URL.RawQuery = v.Encode()
- }
-
- resp, err := c.Sender.Send(&c, req)
- if err != nil {
- return nil, err
- }
-
- if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
- return resp, getErrorFromResponse(resp)
- }
-
- return resp, nil
-}
-
-func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
- headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
- if err != nil {
- return nil, nil, nil, err
- }
-
- req, err := http.NewRequest(verb, url, body)
- for k, v := range headers {
- req.Header.Add(k, v)
- }
-
- resp, err := c.Sender.Send(&c, req)
- if err != nil {
- return nil, nil, nil, err
- }
-
- respToRet := &odataResponse{resp: resp}
-
- statusCode := resp.StatusCode
- if statusCode >= 400 && statusCode <= 505 {
- var respBody []byte
- respBody, err = readAndCloseBody(resp.Body)
- if err != nil {
- return nil, nil, nil, err
- }
-
- requestID, date, version := getDebugHeaders(resp.Header)
- if len(respBody) == 0 {
- // no error in response body, might happen in HEAD requests
- err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
- return respToRet, req, resp, err
- }
- // response contains storage service error object, unmarshal
- if resp.Header.Get("Content-Type") == "application/xml" {
- storageErr := AzureTablesServiceError{
- StatusCode: resp.StatusCode,
- RequestID: requestID,
- Date: date,
- APIVersion: version,
- }
- if err := xml.Unmarshal(respBody, &storageErr); err != nil {
- storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(respBody))
- }
- err = storageErr
- } else {
- err = json.Unmarshal(respBody, &respToRet.odata)
- }
- }
-
- return respToRet, req, resp, err
-}
-
-func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
- respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
- return respToRet, err
-}
-
-func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
- // execute common query, get back generated request, response etc... for more processing.
- respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
- if err != nil {
- return nil, err
- }
-
- // return the OData in the case of executing batch commands.
- // In this case we need to read the outer batch boundary and contents.
- // Then we read the changeset information within the batch
- var respBody []byte
- respBody, err = readAndCloseBody(resp.Body)
- if err != nil {
- return nil, err
- }
-
- // outer multipart body
- _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
- if err != nil {
- return nil, err
- }
-
- // batch details.
- batchBoundary := batchHeader["boundary"]
- batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
- if err != nil {
- return nil, err
- }
-
- // changeset details.
- err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
- if err != nil {
- return nil, err
- }
-
- return respToRet, nil
-}
-
-func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
- changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
- changesetPart, err := changesetMultiReader.NextPart()
- if err != nil {
- return err
- }
-
- changesetPartBufioReader := bufio.NewReader(changesetPart)
- changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
- if err != nil {
- return err
- }
-
- if changesetResp.StatusCode != http.StatusNoContent {
- changesetBody, err := readAndCloseBody(changesetResp.Body)
- err = json.Unmarshal(changesetBody, &respToRet.odata)
- if err != nil {
- return err
- }
- respToRet.resp = changesetResp
- }
-
- return nil
-}
-
-func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
- respBodyString := string(respBody)
- respBodyReader := strings.NewReader(respBodyString)
-
- // reading batchresponse
- batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
- batchPart, err := batchMultiReader.NextPart()
- if err != nil {
- return nil, "", err
- }
- batchPartBufioReader := bufio.NewReader(batchPart)
-
- _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
- if err != nil {
- return nil, "", err
- }
- changesetBoundary := changesetHeader["boundary"]
- return batchPartBufioReader, changesetBoundary, nil
-}
-
-func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
- defer body.Close()
- out, err := ioutil.ReadAll(body)
- if err == io.EOF {
- err = nil
- }
- return out, err
-}
-
-// reads the response body then closes it
-func drainRespBody(resp *http.Response) {
- if resp != nil {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }
-}
-
-func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
- if err := xml.Unmarshal(body, storageErr); err != nil {
- storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
- return err
- }
- return nil
-}
-
-func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
- odataError := odataErrorWrapper{}
- if err := json.Unmarshal(body, &odataError); err != nil {
- storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
- return err
- }
- storageErr.Code = odataError.Err.Code
- storageErr.Message = odataError.Err.Message.Value
- storageErr.Lang = odataError.Err.Message.Lang
- return nil
-}
-
-func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
- return AzureStorageServiceError{
- StatusCode: code,
- Code: status,
- RequestID: requestID,
- Date: date,
- APIVersion: version,
- Message: "no response body was available for error status code",
- }
-}
-
-func (e AzureStorageServiceError) Error() string {
- return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
- e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
-}
-
-// checkRespCode returns UnexpectedStatusError if the given response code is not
-// one of the allowed status codes; otherwise nil.
-func checkRespCode(resp *http.Response, allowed []int) error {
- for _, v := range allowed {
- if resp.StatusCode == v {
- return nil
- }
- }
- err := getErrorFromResponse(resp)
- return UnexpectedStatusCodeError{
- allowed: allowed,
- got: resp.StatusCode,
- inner: err,
- }
-}
-
-func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
- metadata = c.protectUserAgent(metadata)
- for k, v := range metadata {
- h[userDefinedMetadataHeaderPrefix+k] = v
- }
- return h
-}
-
-func getDebugHeaders(h http.Header) (requestID, date, version string) {
- requestID = h.Get("x-ms-request-id")
- version = h.Get("x-ms-version")
- date = h.Get("Date")
- return
-}
-
-func getErrorFromResponse(resp *http.Response) error {
- respBody, err := readAndCloseBody(resp.Body)
- if err != nil {
- return err
- }
-
- requestID, date, version := getDebugHeaders(resp.Header)
- if len(respBody) == 0 {
- // no error in response body, might happen in HEAD requests
- err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
- } else {
- storageErr := AzureStorageServiceError{
- StatusCode: resp.StatusCode,
- RequestID: requestID,
- Date: date,
- APIVersion: version,
- }
- // response contains storage service error object, unmarshal
- if resp.Header.Get("Content-Type") == "application/xml" {
- errIn := serviceErrFromXML(respBody, &storageErr)
- if err != nil { // error unmarshaling the error response
- err = errIn
- }
- } else {
- errIn := serviceErrFromJSON(respBody, &storageErr)
- if err != nil { // error unmarshaling the error response
- err = errIn
- }
- }
- err = storageErr
- }
- return err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
deleted file mode 100644
index a203fce8d4..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "net/url"
- "time"
-)
-
-// SASOptions includes options used by SAS URIs for different
-// services and resources.
-type SASOptions struct {
- APIVersion string
- Start time.Time
- Expiry time.Time
- IP string
- UseHTTPS bool
- Identifier string
-}
-
-func addQueryParameter(query url.Values, key, value string) url.Values {
- if value != "" {
- query.Add(key, value)
- }
- return query
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
deleted file mode 100644
index ae2862c868..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
+++ /dev/null
@@ -1,629 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-// Container represents an Azure container.
-type Container struct {
- bsc *BlobStorageClient
- Name string `xml:"Name"`
- Properties ContainerProperties `xml:"Properties"`
- Metadata map[string]string
- sasuri url.URL
-}
-
-// Client returns the HTTP client used by the Container reference.
-func (c *Container) Client() *Client {
- return &c.bsc.client
-}
-
-func (c *Container) buildPath() string {
- return fmt.Sprintf("/%s", c.Name)
-}
-
-// GetURL gets the canonical URL to the container.
-// This method does not create a publicly accessible URL if the container
-// is private and this method does not check if the blob exists.
-func (c *Container) GetURL() string {
- container := c.Name
- if container == "" {
- container = "$root"
- }
- return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
-}
-
-// ContainerSASOptions are options to construct a container SAS
-// URI.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-type ContainerSASOptions struct {
- ContainerSASPermissions
- OverrideHeaders
- SASOptions
-}
-
-// ContainerSASPermissions includes the available permissions for
-// a container SAS URI.
-type ContainerSASPermissions struct {
- BlobServiceSASPermissions
- List bool
-}
-
-// GetSASURI creates an URL to the container which contains the Shared
-// Access Signature with the specified options.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
- uri := c.GetURL()
- signedResource := "c"
- canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
- if err != nil {
- return "", err
- }
-
- // build permissions string
- permissions := options.BlobServiceSASPermissions.buildString()
- if options.List {
- permissions += "l"
- }
-
- return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
-}
-
-// ContainerProperties contains various properties of a container returned from
-// various endpoints like ListContainers.
-type ContainerProperties struct {
- LastModified string `xml:"Last-Modified"`
- Etag string `xml:"Etag"`
- LeaseStatus string `xml:"LeaseStatus"`
- LeaseState string `xml:"LeaseState"`
- LeaseDuration string `xml:"LeaseDuration"`
- PublicAccess ContainerAccessType `xml:"PublicAccess"`
-}
-
-// ContainerListResponse contains the response fields from
-// ListContainers call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
-type ContainerListResponse struct {
- XMLName xml.Name `xml:"EnumerationResults"`
- Xmlns string `xml:"xmlns,attr"`
- Prefix string `xml:"Prefix"`
- Marker string `xml:"Marker"`
- NextMarker string `xml:"NextMarker"`
- MaxResults int64 `xml:"MaxResults"`
- Containers []Container `xml:"Containers>Container"`
-}
-
-// BlobListResponse contains the response fields from ListBlobs call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
-type BlobListResponse struct {
- XMLName xml.Name `xml:"EnumerationResults"`
- Xmlns string `xml:"xmlns,attr"`
- Prefix string `xml:"Prefix"`
- Marker string `xml:"Marker"`
- NextMarker string `xml:"NextMarker"`
- MaxResults int64 `xml:"MaxResults"`
- Blobs []Blob `xml:"Blobs>Blob"`
-
- // BlobPrefix is used to traverse blobs as if it were a file system.
- // It is returned if ListBlobsParameters.Delimiter is specified.
- // The list here can be thought of as "folders" that may contain
- // other folders or blobs.
- BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
-
- // Delimiter is used to traverse blobs as if it were a file system.
- // It is returned if ListBlobsParameters.Delimiter is specified.
- Delimiter string `xml:"Delimiter"`
-}
-
-// IncludeBlobDataset has options to include in a list blobs operation
-type IncludeBlobDataset struct {
- Snapshots bool
- Metadata bool
- UncommittedBlobs bool
- Copy bool
-}
-
-// ListBlobsParameters defines the set of customizable
-// parameters to make a List Blobs call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
-type ListBlobsParameters struct {
- Prefix string
- Delimiter string
- Marker string
- Include *IncludeBlobDataset
- MaxResults uint
- Timeout uint
- RequestID string
-}
-
-func (p ListBlobsParameters) getParameters() url.Values {
- out := url.Values{}
-
- if p.Prefix != "" {
- out.Set("prefix", p.Prefix)
- }
- if p.Delimiter != "" {
- out.Set("delimiter", p.Delimiter)
- }
- if p.Marker != "" {
- out.Set("marker", p.Marker)
- }
- if p.Include != nil {
- include := []string{}
- include = addString(include, p.Include.Snapshots, "snapshots")
- include = addString(include, p.Include.Metadata, "metadata")
- include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
- include = addString(include, p.Include.Copy, "copy")
- fullInclude := strings.Join(include, ",")
- out.Set("include", fullInclude)
- }
- if p.MaxResults != 0 {
- out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
- }
- if p.Timeout != 0 {
- out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
- }
-
- return out
-}
-
-func addString(datasets []string, include bool, text string) []string {
- if include {
- datasets = append(datasets, text)
- }
- return datasets
-}
-
-// ContainerAccessType defines the access level to the container from a public
-// request.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
-// blob-public-access" header.
-type ContainerAccessType string
-
-// Access options for containers
-const (
- ContainerAccessTypePrivate ContainerAccessType = ""
- ContainerAccessTypeBlob ContainerAccessType = "blob"
- ContainerAccessTypeContainer ContainerAccessType = "container"
-)
-
-// ContainerAccessPolicy represents each access policy in the container ACL.
-type ContainerAccessPolicy struct {
- ID string
- StartTime time.Time
- ExpiryTime time.Time
- CanRead bool
- CanWrite bool
- CanDelete bool
-}
-
-// ContainerPermissions represents the container ACLs.
-type ContainerPermissions struct {
- AccessType ContainerAccessType
- AccessPolicies []ContainerAccessPolicy
-}
-
-// ContainerAccessHeader references header used when setting/getting container ACL
-const (
- ContainerAccessHeader string = "x-ms-blob-public-access"
-)
-
-// GetBlobReference returns a Blob object for the specified blob name.
-func (c *Container) GetBlobReference(name string) *Blob {
- return &Blob{
- Container: c,
- Name: name,
- }
-}
-
-// CreateContainerOptions includes the options for a create container operation
-type CreateContainerOptions struct {
- Timeout uint
- Access ContainerAccessType `header:"x-ms-blob-public-access"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Create creates a blob container within the storage account
-// with given name and access level. Returns error if container already exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
-func (c *Container) Create(options *CreateContainerOptions) error {
- resp, err := c.create(options)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusCreated})
-}
-
-// CreateIfNotExists creates a blob container if it does not exist. Returns
-// true if container is newly created or false if container already exists.
-func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
- resp, err := c.create(options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
- return resp.StatusCode == http.StatusCreated, nil
- }
- }
- return false, err
-}
-
-func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) {
- query := url.Values{"restype": {"container"}}
- headers := c.bsc.client.getStandardHeaders()
- headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
-
- if options != nil {
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
-
- return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
-}
-
-// Exists returns true if a container with given name exists
-// on the storage account, otherwise returns false.
-func (c *Container) Exists() (bool, error) {
- q := url.Values{"restype": {"container"}}
- var uri string
- if c.bsc.client.isServiceSASClient() {
- q = mergeParams(q, c.sasuri.Query())
- newURI := c.sasuri
- newURI.RawQuery = q.Encode()
- uri = newURI.String()
-
- } else {
- uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
- }
- headers := c.bsc.client.getStandardHeaders()
-
- resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusOK, nil
- }
- }
- return false, err
-}
-
-// SetContainerPermissionOptions includes options for a set container permissions operation
-type SetContainerPermissionOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// SetPermissions sets up container permissions
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
-func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
- body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
- if err != nil {
- return err
- }
- params := url.Values{
- "restype": {"container"},
- "comp": {"acl"},
- }
- headers := c.bsc.client.getStandardHeaders()
- headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
- headers["Content-Length"] = strconv.Itoa(length)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
-
- resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusOK})
-}
-
-// GetContainerPermissionOptions includes options for a get container permissions operation
-type GetContainerPermissionOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
-// If timeout is 0 then it will not be passed to Azure
-// leaseID will only be passed to Azure if populated
-func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
- params := url.Values{
- "restype": {"container"},
- "comp": {"acl"},
- }
- headers := c.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
-
- resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- var ap AccessPolicy
- err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
- if err != nil {
- return nil, err
- }
- return buildAccessPolicy(ap, &resp.Header), nil
-}
-
-func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
- // containerAccess. Blob, Container, empty
- containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
- permissions := ContainerPermissions{
- AccessType: ContainerAccessType(containerAccess),
- AccessPolicies: []ContainerAccessPolicy{},
- }
-
- for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
- capd := ContainerAccessPolicy{
- ID: policy.ID,
- StartTime: policy.AccessPolicy.StartTime,
- ExpiryTime: policy.AccessPolicy.ExpiryTime,
- }
- capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
- capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
- capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
-
- permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
- }
- return &permissions
-}
-
-// DeleteContainerOptions includes options for a delete container operation
-type DeleteContainerOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Delete deletes the container with given name on the storage
-// account. If the container does not exist returns error.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
-func (c *Container) Delete(options *DeleteContainerOptions) error {
- resp, err := c.delete(options)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusAccepted})
-}
-
-// DeleteIfExists deletes the container with given name on the storage
-// account if it exists. Returns true if container is deleted with this call, or
-// false if the container did not exist at the time of the Delete Container
-// operation.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
-func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
- resp, err := c.delete(options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusAccepted, nil
- }
- }
- return false, err
-}
-
-func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) {
- query := url.Values{"restype": {"container"}}
- headers := c.bsc.client.getStandardHeaders()
-
- if options != nil {
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
-
- return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
-}
-
-// ListBlobs returns an object that contains list of blobs in the container,
-// pagination token and other information in the response of List Blobs call.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
-func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
- q := mergeParams(params.getParameters(), url.Values{
- "restype": {"container"},
- "comp": {"list"},
- })
- var uri string
- if c.bsc.client.isServiceSASClient() {
- q = mergeParams(q, c.sasuri.Query())
- newURI := c.sasuri
- newURI.RawQuery = q.Encode()
- uri = newURI.String()
- } else {
- uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
- }
-
- headers := c.bsc.client.getStandardHeaders()
- headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
-
- var out BlobListResponse
- resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
- if err != nil {
- return out, err
- }
- defer resp.Body.Close()
-
- err = xmlUnmarshal(resp.Body, &out)
- for i := range out.Blobs {
- out.Blobs[i].Container = c
- }
- return out, err
-}
-
-// ContainerMetadataOptions includes options for container metadata operations
-type ContainerMetadataOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// SetMetadata replaces the metadata for the specified container.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetBlobMetadata. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
-func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
- params := url.Values{
- "comp": {"metadata"},
- "restype": {"container"},
- }
- headers := c.bsc.client.getStandardHeaders()
- headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
-
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
-
- resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusOK})
-}
-
-// GetMetadata returns all user-defined metadata for the specified container.
-//
-// All metadata keys will be returned in lower case. (HTTP header
-// names are case-insensitive.)
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
-func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
- params := url.Values{
- "comp": {"metadata"},
- "restype": {"container"},
- }
- headers := c.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
-
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
-
- resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- c.writeMetadata(resp.Header)
- return nil
-}
-
-func (c *Container) writeMetadata(h http.Header) {
- c.Metadata = writeMetadata(h)
-}
-
-func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
- sil := SignedIdentifiers{
- SignedIdentifiers: []SignedIdentifier{},
- }
- for _, capd := range policies {
- permission := capd.generateContainerPermissions()
- signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
- sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
- }
- return xmlMarshal(sil)
-}
-
-func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
- // generate the permissions string (rwd).
- // still want the end user API to have bool flags.
- permissions = ""
-
- if capd.CanRead {
- permissions += "r"
- }
-
- if capd.CanWrite {
- permissions += "w"
- }
-
- if capd.CanDelete {
- permissions += "d"
- }
-
- return permissions
-}
-
-// GetProperties updated the properties of the container.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
-func (c *Container) GetProperties() error {
- params := url.Values{
- "restype": {"container"},
- }
- headers := c.bsc.client.getStandardHeaders()
-
- uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
-
- resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- // update properties
- c.Properties.Etag = resp.Header.Get(headerEtag)
- c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status")
- c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state")
- c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration")
- c.Properties.LastModified = resp.Header.Get("Last-Modified")
- c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader))
-
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
deleted file mode 100644
index 3696e804fe..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "time"
-)
-
-const (
- blobCopyStatusPending = "pending"
- blobCopyStatusSuccess = "success"
- blobCopyStatusAborted = "aborted"
- blobCopyStatusFailed = "failed"
-)
-
-// CopyOptions includes the options for a copy blob operation
-type CopyOptions struct {
- Timeout uint
- Source CopyOptionsConditions
- Destiny CopyOptionsConditions
- RequestID string
-}
-
-// IncrementalCopyOptions includes the options for an incremental copy blob operation
-type IncrementalCopyOptions struct {
- Timeout uint
- Destination IncrementalCopyOptionsConditions
- RequestID string
-}
-
-// CopyOptionsConditions includes some conditional options in a copy blob operation
-type CopyOptionsConditions struct {
- LeaseID string
- IfModifiedSince *time.Time
- IfUnmodifiedSince *time.Time
- IfMatch string
- IfNoneMatch string
-}
-
-// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
-type IncrementalCopyOptionsConditions struct {
- IfModifiedSince *time.Time
- IfUnmodifiedSince *time.Time
- IfMatch string
- IfNoneMatch string
-}
-
-// Copy starts a blob copy operation and waits for the operation to
-// complete. sourceBlob parameter must be a canonical URL to the blob (can be
-// obtained using the GetURL method.) There is no SLA on blob copy and therefore
-// this helper method works faster on smaller files.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
-func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
- copyID, err := b.StartCopy(sourceBlob, options)
- if err != nil {
- return err
- }
-
- return b.WaitForCopy(copyID)
-}
-
-// StartCopy starts a blob copy operation.
-// sourceBlob parameter must be a canonical URL to the blob (can be
-// obtained using the GetURL method.)
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
-func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-copy-source"] = sourceBlob
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
- // source
- headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
- headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
- headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
- headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
- headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
- //destiny
- headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
- headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
- headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
- headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
- headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return "", err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
- return "", err
- }
-
- copyID := resp.Header.Get("x-ms-copy-id")
- if copyID == "" {
- return "", errors.New("Got empty copy id header")
- }
- return copyID, nil
-}
-
-// AbortCopyOptions includes the options for an abort blob operation
-type AbortCopyOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
-// copyID is generated from StartBlobCopy function.
-// currentLeaseID is required IF the destination blob has an active lease on it.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
-func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
- params := url.Values{
- "comp": {"copy"},
- "copyid": {copyID},
- }
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-copy-action"] = "abort"
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
-func (b *Blob) WaitForCopy(copyID string) error {
- for {
- err := b.GetProperties(nil)
- if err != nil {
- return err
- }
-
- if b.Properties.CopyID != copyID {
- return errBlobCopyIDMismatch
- }
-
- switch b.Properties.CopyStatus {
- case blobCopyStatusSuccess:
- return nil
- case blobCopyStatusPending:
- continue
- case blobCopyStatusAborted:
- return errBlobCopyAborted
- case blobCopyStatusFailed:
- return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
- default:
- return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
- }
- }
-}
-
-// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
-// sourceBlob parameter must be a valid snapshot URL of the original blob.
-// THe original blob mut be public, or use a Shared Access Signature.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
-func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
- params := url.Values{"comp": {"incrementalcopy"}}
-
- // need formatting to 7 decimal places so it's friendly to Windows and *nix
- snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
- u, err := url.Parse(sourceBlobURL)
- if err != nil {
- return "", err
- }
- query := u.Query()
- query.Add("snapshot", snapshotTimeFormatted)
- encodedQuery := query.Encode()
- encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
- u.RawQuery = encodedQuery
- snapshotURL := u.String()
-
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-copy-source"] = snapshotURL
-
- if options != nil {
- addTimeout(params, options.Timeout)
- headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
- headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
- headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
- headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
- headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
- }
-
- // get URI of destination blob
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return "", err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil {
- return "", err
- }
-
- copyID := resp.Header.Get("x-ms-copy-id")
- if copyID == "" {
- return "", errors.New("Got empty copy id header")
- }
- return copyID, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
deleted file mode 100644
index 498e9837c5..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "net/http"
- "net/url"
- "sync"
-)
-
-// Directory represents a directory on a share.
-type Directory struct {
- fsc *FileServiceClient
- Metadata map[string]string
- Name string `xml:"Name"`
- parent *Directory
- Properties DirectoryProperties
- share *Share
-}
-
-// DirectoryProperties contains various properties of a directory.
-type DirectoryProperties struct {
- LastModified string `xml:"Last-Modified"`
- Etag string `xml:"Etag"`
-}
-
-// ListDirsAndFilesParameters defines the set of customizable parameters to
-// make a List Files and Directories call.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
-type ListDirsAndFilesParameters struct {
- Prefix string
- Marker string
- MaxResults uint
- Timeout uint
-}
-
-// DirsAndFilesListResponse contains the response fields from
-// a List Files and Directories call.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
-type DirsAndFilesListResponse struct {
- XMLName xml.Name `xml:"EnumerationResults"`
- Xmlns string `xml:"xmlns,attr"`
- Marker string `xml:"Marker"`
- MaxResults int64 `xml:"MaxResults"`
- Directories []Directory `xml:"Entries>Directory"`
- Files []File `xml:"Entries>File"`
- NextMarker string `xml:"NextMarker"`
-}
-
-// builds the complete directory path for this directory object.
-func (d *Directory) buildPath() string {
- path := ""
- current := d
- for current.Name != "" {
- path = "/" + current.Name + path
- current = current.parent
- }
- return d.share.buildPath() + path
-}
-
-// Create this directory in the associated share.
-// If a directory with the same name already exists, the operation fails.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
-func (d *Directory) Create(options *FileRequestOptions) error {
- // if this is the root directory exit early
- if d.parent == nil {
- return nil
- }
-
- params := prepareOptions(options)
- headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
- if err != nil {
- return err
- }
-
- d.updateEtagAndLastModified(headers)
- return nil
-}
-
-// CreateIfNotExists creates this directory under the associated share if the
-// directory does not exist. Returns true if the directory is newly created or
-// false if the directory already exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
-func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
- // if this is the root directory exit early
- if d.parent == nil {
- return false, nil
- }
-
- params := prepareOptions(options)
- resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
- if resp.StatusCode == http.StatusCreated {
- d.updateEtagAndLastModified(resp.Header)
- return true, nil
- }
-
- return false, d.FetchAttributes(nil)
- }
- }
-
- return false, err
-}
-
-// Delete removes this directory. It must be empty in order to be deleted.
-// If the directory does not exist the operation fails.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
-func (d *Directory) Delete(options *FileRequestOptions) error {
- return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
-}
-
-// DeleteIfExists removes this directory if it exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
-func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
- resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusAccepted, nil
- }
- }
- return false, err
-}
-
-// Exists returns true if this directory exists.
-func (d *Directory) Exists() (bool, error) {
- exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
- if exists {
- d.updateEtagAndLastModified(headers)
- }
- return exists, err
-}
-
-// FetchAttributes retrieves metadata for this directory.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
-func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
- params := prepareOptions(options)
- headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
- if err != nil {
- return err
- }
-
- d.updateEtagAndLastModified(headers)
- d.Metadata = getMetadataFromHeaders(headers)
-
- return nil
-}
-
-// GetDirectoryReference returns a child Directory object for this directory.
-func (d *Directory) GetDirectoryReference(name string) *Directory {
- return &Directory{
- fsc: d.fsc,
- Name: name,
- parent: d,
- share: d.share,
- }
-}
-
-// GetFileReference returns a child File object for this directory.
-func (d *Directory) GetFileReference(name string) *File {
- return &File{
- fsc: d.fsc,
- Name: name,
- parent: d,
- share: d.share,
- mutex: &sync.Mutex{},
- }
-}
-
-// ListDirsAndFiles returns a list of files and directories under this directory.
-// It also contains a pagination token and other response details.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
-func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
- q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
-
- resp, err := d.fsc.listContent(d.buildPath(), q, nil)
- if err != nil {
- return nil, err
- }
-
- defer resp.Body.Close()
- var out DirsAndFilesListResponse
- err = xmlUnmarshal(resp.Body, &out)
- return &out, err
-}
-
-// SetMetadata replaces the metadata for this directory.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetDirectoryMetadata. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
-func (d *Directory) SetMetadata(options *FileRequestOptions) error {
- headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
- if err != nil {
- return err
- }
-
- d.updateEtagAndLastModified(headers)
- return nil
-}
-
-// updates Etag and last modified date
-func (d *Directory) updateEtagAndLastModified(headers http.Header) {
- d.Properties.Etag = headers.Get("Etag")
- d.Properties.LastModified = headers.Get("Last-Modified")
-}
-
-// URL gets the canonical URL to this directory.
-// This method does not create a publicly accessible URL if the directory
-// is private and this method does not check if the directory exists.
-func (d *Directory) URL() string {
- return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
deleted file mode 100644
index 9ef63c8dd9..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
+++ /dev/null
@@ -1,455 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/gofrs/uuid"
-)
-
-// Annotating as secure for gas scanning
-/* #nosec */
-const (
- partitionKeyNode = "PartitionKey"
- rowKeyNode = "RowKey"
- etagErrorTemplate = "Etag didn't match: %v"
-)
-
-var (
- errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
- errNilPreviousResult = errors.New("The previous results page is nil")
- errNilNextLink = errors.New("There are no more pages in this query results")
-)
-
-// Entity represents an entity inside an Azure table.
-type Entity struct {
- Table *Table
- PartitionKey string
- RowKey string
- TimeStamp time.Time
- OdataMetadata string
- OdataType string
- OdataID string
- OdataEtag string
- OdataEditLink string
- Properties map[string]interface{}
-}
-
-// GetEntityReference returns an Entity object with the specified
-// partition key and row key.
-func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
- return &Entity{
- PartitionKey: partitionKey,
- RowKey: rowKey,
- Table: t,
- }
-}
-
-// EntityOptions includes options for entity operations.
-type EntityOptions struct {
- Timeout uint
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetEntityOptions includes options for a get entity operation
-type GetEntityOptions struct {
- Select []string
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Get gets the referenced entity. Which properties to get can be
-// specified using the select option.
-// See:
-// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
-// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
-func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
- if ml == EmptyPayload {
- return errEmptyPayload
- }
- // RowKey and PartitionKey could be lost if not included in the query
- // As those are the entity identifiers, it is best if they are not lost
- rk := e.RowKey
- pk := e.PartitionKey
-
- query := url.Values{
- "timeout": {strconv.FormatUint(uint64(timeout), 10)},
- }
- headers := e.Table.tsc.client.getStandardHeaders()
- headers[headerAccept] = string(ml)
-
- if options != nil {
- if len(options.Select) > 0 {
- query.Add("$select", strings.Join(options.Select, ","))
- }
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
-
- uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
- resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- respBody, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return err
- }
- err = json.Unmarshal(respBody, e)
- if err != nil {
- return err
- }
- e.PartitionKey = pk
- e.RowKey = rk
-
- return nil
-}
-
-// Insert inserts the referenced entity in its table.
-// The function fails if there is an entity with the same
-// PartitionKey and RowKey in the table.
-// ml determines the level of detail of metadata in the operation response,
-// or no data at all.
-// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
-func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
- query, headers := options.getParameters()
- headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
-
- body, err := json.Marshal(e)
- if err != nil {
- return err
- }
- headers = addBodyRelatedHeaders(headers, len(body))
- headers = addReturnContentHeaders(headers, ml)
-
- uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
- resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if ml != EmptyPayload {
- if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil {
- return err
- }
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return err
- }
- if err = e.UnmarshalJSON(data); err != nil {
- return err
- }
- } else {
- if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Update updates the contents of an entity. The function fails if there is no entity
-// with the same PartitionKey and RowKey in the table or if the ETag is different
-// than the one in Azure.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
-func (e *Entity) Update(force bool, options *EntityOptions) error {
- return e.updateMerge(force, http.MethodPut, options)
-}
-
-// Merge merges the contents of entity specified with PartitionKey and RowKey
-// with the content specified in Properties.
-// The function fails if there is no entity with the same PartitionKey and
-// RowKey in the table or if the ETag is different than the one in Azure.
-// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
-func (e *Entity) Merge(force bool, options *EntityOptions) error {
- return e.updateMerge(force, "MERGE", options)
-}
-
-// Delete deletes the entity.
-// The function fails if there is no entity with the same PartitionKey and
-// RowKey in the table or if the ETag is different than the one in Azure.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
-func (e *Entity) Delete(force bool, options *EntityOptions) error {
- query, headers := options.getParameters()
- headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
-
- headers = addIfMatchHeader(headers, force, e.OdataEtag)
- headers = addReturnContentHeaders(headers, EmptyPayload)
-
- uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
- resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
- if err != nil {
- if resp != nil && resp.StatusCode == http.StatusPreconditionFailed {
- return fmt.Errorf(etagErrorTemplate, err)
- }
- return err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
- return err
- }
-
- return e.updateTimestamp(resp.Header)
-}
-
-// InsertOrReplace inserts an entity or replaces the existing one.
-// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
-func (e *Entity) InsertOrReplace(options *EntityOptions) error {
- return e.insertOr(http.MethodPut, options)
-}
-
-// InsertOrMerge inserts an entity or merges the existing one.
-// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
-func (e *Entity) InsertOrMerge(options *EntityOptions) error {
- return e.insertOr("MERGE", options)
-}
-
-func (e *Entity) buildPath() string {
- return fmt.Sprintf("%s(PartitionKey='%s',RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
-}
-
-// MarshalJSON is a custom marshaller for entity
-func (e *Entity) MarshalJSON() ([]byte, error) {
- completeMap := map[string]interface{}{}
- completeMap[partitionKeyNode] = e.PartitionKey
- completeMap[rowKeyNode] = e.RowKey
- for k, v := range e.Properties {
- typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
- switch t := v.(type) {
- case []byte:
- completeMap[typeKey] = OdataBinary
- completeMap[k] = t
- case time.Time:
- completeMap[typeKey] = OdataDateTime
- completeMap[k] = t.Format(time.RFC3339Nano)
- case uuid.UUID:
- completeMap[typeKey] = OdataGUID
- completeMap[k] = t.String()
- case int64:
- completeMap[typeKey] = OdataInt64
- completeMap[k] = fmt.Sprintf("%v", v)
- case float32, float64:
- completeMap[typeKey] = OdataDouble
- completeMap[k] = fmt.Sprintf("%v", v)
- default:
- completeMap[k] = v
- }
- if strings.HasSuffix(k, OdataTypeSuffix) {
- if !(completeMap[k] == OdataBinary ||
- completeMap[k] == OdataDateTime ||
- completeMap[k] == OdataGUID ||
- completeMap[k] == OdataInt64 ||
- completeMap[k] == OdataDouble) {
- return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
- }
- valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
- if _, ok := completeMap[valueKey]; !ok {
- return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
- }
- }
- }
- return json.Marshal(completeMap)
-}
-
-// UnmarshalJSON is a custom unmarshaller for entities
-func (e *Entity) UnmarshalJSON(data []byte) error {
- errorTemplate := "Deserializing error: %v"
-
- props := map[string]interface{}{}
- err := json.Unmarshal(data, &props)
- if err != nil {
- return err
- }
-
- // deselialize metadata
- e.OdataMetadata = stringFromMap(props, "odata.metadata")
- e.OdataType = stringFromMap(props, "odata.type")
- e.OdataID = stringFromMap(props, "odata.id")
- e.OdataEtag = stringFromMap(props, "odata.etag")
- e.OdataEditLink = stringFromMap(props, "odata.editLink")
- e.PartitionKey = stringFromMap(props, partitionKeyNode)
- e.RowKey = stringFromMap(props, rowKeyNode)
-
- // deserialize timestamp
- timeStamp, ok := props["Timestamp"]
- if ok {
- str, ok := timeStamp.(string)
- if !ok {
- return fmt.Errorf(errorTemplate, "Timestamp casting error")
- }
- t, err := time.Parse(time.RFC3339Nano, str)
- if err != nil {
- return fmt.Errorf(errorTemplate, err)
- }
- e.TimeStamp = t
- }
- delete(props, "Timestamp")
- delete(props, "Timestamp@odata.type")
-
- // deserialize entity (user defined fields)
- for k, v := range props {
- if strings.HasSuffix(k, OdataTypeSuffix) {
- valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
- str, ok := props[valueKey].(string)
- if !ok {
- return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
- }
- switch v {
- case OdataBinary:
- props[valueKey], err = base64.StdEncoding.DecodeString(str)
- if err != nil {
- return fmt.Errorf(errorTemplate, err)
- }
- case OdataDateTime:
- t, err := time.Parse("2006-01-02T15:04:05Z", str)
- if err != nil {
- return fmt.Errorf(errorTemplate, err)
- }
- props[valueKey] = t
- case OdataGUID:
- props[valueKey] = uuid.FromStringOrNil(str)
- case OdataInt64:
- i, err := strconv.ParseInt(str, 10, 64)
- if err != nil {
- return fmt.Errorf(errorTemplate, err)
- }
- props[valueKey] = i
- case OdataDouble:
- f, err := strconv.ParseFloat(str, 64)
- if err != nil {
- return fmt.Errorf(errorTemplate, err)
- }
- props[valueKey] = f
- default:
- return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
- }
- delete(props, k)
- }
- }
-
- e.Properties = props
- return nil
-}
-
-func getAndDelete(props map[string]interface{}, key string) interface{} {
- if value, ok := props[key]; ok {
- delete(props, key)
- return value
- }
- return nil
-}
-
-func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
- if force {
- h[headerIfMatch] = "*"
- } else {
- h[headerIfMatch] = etag
- }
- return h
-}
-
-// updates Etag and timestamp
-func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
- e.OdataEtag = headers.Get(headerEtag)
- return e.updateTimestamp(headers)
-}
-
-func (e *Entity) updateTimestamp(headers http.Header) error {
- str := headers.Get(headerDate)
- t, err := time.Parse(time.RFC1123, str)
- if err != nil {
- return fmt.Errorf("Update timestamp error: %v", err)
- }
- e.TimeStamp = t
- return nil
-}
-
-func (e *Entity) insertOr(verb string, options *EntityOptions) error {
- query, headers := options.getParameters()
- headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
-
- body, err := json.Marshal(e)
- if err != nil {
- return err
- }
- headers = addBodyRelatedHeaders(headers, len(body))
- headers = addReturnContentHeaders(headers, EmptyPayload)
-
- uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
- resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
- return err
- }
-
- return e.updateEtagAndTimestamp(resp.Header)
-}
-
-func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
- query, headers := options.getParameters()
- headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
-
- body, err := json.Marshal(e)
- if err != nil {
- return err
- }
- headers = addBodyRelatedHeaders(headers, len(body))
- headers = addIfMatchHeader(headers, force, e.OdataEtag)
- headers = addReturnContentHeaders(headers, EmptyPayload)
-
- uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
- resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
- if err != nil {
- if resp != nil && resp.StatusCode == http.StatusPreconditionFailed {
- return fmt.Errorf(etagErrorTemplate, err)
- }
- return err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
- return err
- }
-
- return e.updateEtagAndTimestamp(resp.Header)
-}
-
-func stringFromMap(props map[string]interface{}, key string) string {
- value := getAndDelete(props, key)
- if value != nil {
- return value.(string)
- }
- return ""
-}
-
-func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
- query := url.Values{}
- headers := map[string]string{}
- if options != nil {
- query = addTimeout(query, options.Timeout)
- headers = headersFromStruct(*options)
- }
- return query, headers
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
deleted file mode 100644
index 9848025ccb..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
+++ /dev/null
@@ -1,473 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "sync"
-)
-
-const fourMB = uint64(4194304)
-const oneTB = uint64(1099511627776)
-
-// Export maximum range and file sizes
-
-// MaxRangeSize defines the maximum size in bytes for a file range.
-const MaxRangeSize = fourMB
-
-// MaxFileSize defines the maximum size in bytes for a file.
-const MaxFileSize = oneTB
-
-// File represents a file on a share.
-type File struct {
- fsc *FileServiceClient
- Metadata map[string]string
- Name string `xml:"Name"`
- parent *Directory
- Properties FileProperties `xml:"Properties"`
- share *Share
- FileCopyProperties FileCopyState
- mutex *sync.Mutex
-}
-
-// FileProperties contains various properties of a file.
-type FileProperties struct {
- CacheControl string `header:"x-ms-cache-control"`
- Disposition string `header:"x-ms-content-disposition"`
- Encoding string `header:"x-ms-content-encoding"`
- Etag string
- Language string `header:"x-ms-content-language"`
- LastModified string
- Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
- MD5 string `header:"x-ms-content-md5"`
- Type string `header:"x-ms-content-type"`
-}
-
-// FileCopyState contains various properties of a file copy operation.
-type FileCopyState struct {
- CompletionTime string
- ID string `header:"x-ms-copy-id"`
- Progress string
- Source string
- Status string `header:"x-ms-copy-status"`
- StatusDesc string
-}
-
-// FileStream contains file data returned from a call to GetFile.
-type FileStream struct {
- Body io.ReadCloser
- ContentMD5 string
-}
-
-// FileRequestOptions will be passed to misc file operations.
-// Currently just Timeout (in seconds) but could expand.
-type FileRequestOptions struct {
- Timeout uint // timeout duration in seconds.
-}
-
-func prepareOptions(options *FileRequestOptions) url.Values {
- params := url.Values{}
- if options != nil {
- params = addTimeout(params, options.Timeout)
- }
- return params
-}
-
-// FileRanges contains a list of file range information for a file.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
-type FileRanges struct {
- ContentLength uint64
- LastModified string
- ETag string
- FileRanges []FileRange `xml:"Range"`
-}
-
-// FileRange contains range information for a file.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
-type FileRange struct {
- Start uint64 `xml:"Start"`
- End uint64 `xml:"End"`
-}
-
-func (fr FileRange) String() string {
- return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
-}
-
-// builds the complete file path for this file object
-func (f *File) buildPath() string {
- return f.parent.buildPath() + "/" + f.Name
-}
-
-// ClearRange releases the specified range of space in a file.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
-func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
- var timeout *uint
- if options != nil {
- timeout = &options.Timeout
- }
- headers, err := f.modifyRange(nil, fileRange, timeout, nil)
- if err != nil {
- return err
- }
-
- f.updateEtagAndLastModified(headers)
- return nil
-}
-
-// Create creates a new file or replaces an existing one.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
-func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
- if maxSize > oneTB {
- return fmt.Errorf("max file size is 1TB")
- }
- params := prepareOptions(options)
- headers := headersFromStruct(f.Properties)
- headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
- headers["x-ms-type"] = "file"
-
- outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
- if err != nil {
- return err
- }
-
- f.Properties.Length = maxSize
- f.updateEtagAndLastModified(outputHeaders)
- return nil
-}
-
-// CopyFile operation copied a file/blob from the sourceURL to the path provided.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
-func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
- extraHeaders := map[string]string{
- "x-ms-type": "file",
- "x-ms-copy-source": sourceURL,
- }
- params := prepareOptions(options)
-
- headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
- if err != nil {
- return err
- }
-
- f.updateEtagAndLastModified(headers)
- f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
- f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
- return nil
-}
-
-// Delete immediately removes this file from the storage account.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
-func (f *File) Delete(options *FileRequestOptions) error {
- return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
-}
-
-// DeleteIfExists removes this file if it exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
-func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
- resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusAccepted, nil
- }
- }
- return false, err
-}
-
-// GetFileOptions includes options for a get file operation
-type GetFileOptions struct {
- Timeout uint
- GetContentMD5 bool
-}
-
-// DownloadToStream operation downloads the file.
-//
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
-func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
- params := prepareOptions(options)
- resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
- if err != nil {
- return nil, err
- }
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- drainRespBody(resp)
- return nil, err
- }
- return resp.Body, nil
-}
-
-// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
-func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
- extraHeaders := map[string]string{
- "Range": fileRange.String(),
- }
- params := url.Values{}
- if options != nil {
- if options.GetContentMD5 {
- if isRangeTooBig(fileRange) {
- return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
- }
- extraHeaders["x-ms-range-get-content-md5"] = "true"
- }
- params = addTimeout(params, options.Timeout)
- }
-
- resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
- if err != nil {
- return fs, err
- }
-
- if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
- drainRespBody(resp)
- return fs, err
- }
-
- fs.Body = resp.Body
- if options != nil && options.GetContentMD5 {
- fs.ContentMD5 = resp.Header.Get("Content-MD5")
- }
- return fs, nil
-}
-
-// Exists returns true if this file exists.
-func (f *File) Exists() (bool, error) {
- exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
- if exists {
- f.updateEtagAndLastModified(headers)
- f.updateProperties(headers)
- }
- return exists, err
-}
-
-// FetchAttributes updates metadata and properties for this file.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
-func (f *File) FetchAttributes(options *FileRequestOptions) error {
- params := prepareOptions(options)
- headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
- if err != nil {
- return err
- }
-
- f.updateEtagAndLastModified(headers)
- f.updateProperties(headers)
- f.Metadata = getMetadataFromHeaders(headers)
- return nil
-}
-
-// returns true if the range is larger than 4MB
-func isRangeTooBig(fileRange FileRange) bool {
- if fileRange.End-fileRange.Start > fourMB {
- return true
- }
-
- return false
-}
-
-// ListRangesOptions includes options for a list file ranges operation
-type ListRangesOptions struct {
- Timeout uint
- ListRange *FileRange
-}
-
-// ListRanges returns the list of valid ranges for this file.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
-func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
- params := url.Values{"comp": {"rangelist"}}
-
- // add optional range to list
- var headers map[string]string
- if options != nil {
- params = addTimeout(params, options.Timeout)
- if options.ListRange != nil {
- headers = make(map[string]string)
- headers["Range"] = options.ListRange.String()
- }
- }
-
- resp, err := f.fsc.listContent(f.buildPath(), params, headers)
- if err != nil {
- return nil, err
- }
-
- defer resp.Body.Close()
- var cl uint64
- cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64)
- if err != nil {
- ioutil.ReadAll(resp.Body)
- return nil, err
- }
-
- var out FileRanges
- out.ContentLength = cl
- out.ETag = resp.Header.Get("ETag")
- out.LastModified = resp.Header.Get("Last-Modified")
-
- err = xmlUnmarshal(resp.Body, &out)
- return &out, err
-}
-
-// modifies a range of bytes in this file
-func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
- if err := f.fsc.checkForStorageEmulator(); err != nil {
- return nil, err
- }
- if fileRange.End < fileRange.Start {
- return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
- }
- if bytes != nil && isRangeTooBig(fileRange) {
- return nil, errors.New("range cannot exceed 4MB in size")
- }
-
- params := url.Values{"comp": {"range"}}
- if timeout != nil {
- params = addTimeout(params, *timeout)
- }
-
- uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
-
- // default to clear
- write := "clear"
- cl := uint64(0)
-
- // if bytes is not nil then this is an update operation
- if bytes != nil {
- write = "update"
- cl = (fileRange.End - fileRange.Start) + 1
- }
-
- extraHeaders := map[string]string{
- "Content-Length": strconv.FormatUint(cl, 10),
- "Range": fileRange.String(),
- "x-ms-write": write,
- }
-
- if contentMD5 != nil {
- extraHeaders["Content-MD5"] = *contentMD5
- }
-
- headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
- resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
- if err != nil {
- return nil, err
- }
- defer drainRespBody(resp)
- return resp.Header, checkRespCode(resp, []int{http.StatusCreated})
-}
-
-// SetMetadata replaces the metadata for this file.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetFileMetadata. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
-func (f *File) SetMetadata(options *FileRequestOptions) error {
- headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
- if err != nil {
- return err
- }
-
- f.updateEtagAndLastModified(headers)
- return nil
-}
-
-// SetProperties sets system properties on this file.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by SetFileProperties. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
-func (f *File) SetProperties(options *FileRequestOptions) error {
- headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
- if err != nil {
- return err
- }
-
- f.updateEtagAndLastModified(headers)
- return nil
-}
-
-// updates Etag and last modified date
-func (f *File) updateEtagAndLastModified(headers http.Header) {
- f.Properties.Etag = headers.Get("Etag")
- f.Properties.LastModified = headers.Get("Last-Modified")
-}
-
-// updates file properties from the specified HTTP header
-func (f *File) updateProperties(header http.Header) {
- size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
- if err == nil {
- f.Properties.Length = size
- }
-
- f.updateEtagAndLastModified(header)
- f.Properties.CacheControl = header.Get("Cache-Control")
- f.Properties.Disposition = header.Get("Content-Disposition")
- f.Properties.Encoding = header.Get("Content-Encoding")
- f.Properties.Language = header.Get("Content-Language")
- f.Properties.MD5 = header.Get("Content-MD5")
- f.Properties.Type = header.Get("Content-Type")
-}
-
-// URL gets the canonical URL to this file.
-// This method does not create a publicly accessible URL if the file
-// is private and this method does not check if the file exists.
-func (f *File) URL() string {
- return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
-}
-
-// WriteRangeOptions includes options for a write file range operation
-type WriteRangeOptions struct {
- Timeout uint
- ContentMD5 string
-}
-
-// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
-// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
-// a maximum size of 4MB.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
-func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
- if bytes == nil {
- return errors.New("bytes cannot be nil")
- }
- var timeout *uint
- var md5 *string
- if options != nil {
- timeout = &options.Timeout
- md5 = &options.ContentMD5
- }
-
- headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
- if err != nil {
- return err
- }
- // it's perfectly legal for multiple go routines to call WriteRange
- // on the same *File (e.g. concurrently writing non-overlapping ranges)
- // so we must take the file mutex before updating our properties.
- f.mutex.Lock()
- f.updateEtagAndLastModified(headers)
- f.mutex.Unlock()
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
deleted file mode 100644
index 6a12d6dcba..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
-)
-
-// FileServiceClient contains operations for Microsoft Azure File Service.
-type FileServiceClient struct {
- client Client
- auth authentication
-}
-
-// ListSharesParameters defines the set of customizable parameters to make a
-// List Shares call.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
-type ListSharesParameters struct {
- Prefix string
- Marker string
- Include string
- MaxResults uint
- Timeout uint
-}
-
-// ShareListResponse contains the response fields from
-// ListShares call.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
-type ShareListResponse struct {
- XMLName xml.Name `xml:"EnumerationResults"`
- Xmlns string `xml:"xmlns,attr"`
- Prefix string `xml:"Prefix"`
- Marker string `xml:"Marker"`
- NextMarker string `xml:"NextMarker"`
- MaxResults int64 `xml:"MaxResults"`
- Shares []Share `xml:"Shares>Share"`
-}
-
-type compType string
-
-const (
- compNone compType = ""
- compList compType = "list"
- compMetadata compType = "metadata"
- compProperties compType = "properties"
- compRangeList compType = "rangelist"
-)
-
-func (ct compType) String() string {
- return string(ct)
-}
-
-type resourceType string
-
-const (
- resourceDirectory resourceType = "directory"
- resourceFile resourceType = ""
- resourceShare resourceType = "share"
-)
-
-func (rt resourceType) String() string {
- return string(rt)
-}
-
-func (p ListSharesParameters) getParameters() url.Values {
- out := url.Values{}
-
- if p.Prefix != "" {
- out.Set("prefix", p.Prefix)
- }
- if p.Marker != "" {
- out.Set("marker", p.Marker)
- }
- if p.Include != "" {
- out.Set("include", p.Include)
- }
- if p.MaxResults != 0 {
- out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
- }
- if p.Timeout != 0 {
- out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
- }
-
- return out
-}
-
-func (p ListDirsAndFilesParameters) getParameters() url.Values {
- out := url.Values{}
-
- if p.Prefix != "" {
- out.Set("prefix", p.Prefix)
- }
- if p.Marker != "" {
- out.Set("marker", p.Marker)
- }
- if p.MaxResults != 0 {
- out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
- }
- out = addTimeout(out, p.Timeout)
-
- return out
-}
-
-// returns url.Values for the specified types
-func getURLInitValues(comp compType, res resourceType) url.Values {
- values := url.Values{}
- if comp != compNone {
- values.Set("comp", comp.String())
- }
- if res != resourceFile {
- values.Set("restype", res.String())
- }
- return values
-}
-
-// GetShareReference returns a Share object for the specified share name.
-func (f *FileServiceClient) GetShareReference(name string) *Share {
- return &Share{
- fsc: f,
- Name: name,
- Properties: ShareProperties{
- Quota: -1,
- },
- }
-}
-
-// ListShares returns the list of shares in a storage account along with
-// pagination token and other response details.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
-func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
- q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
-
- var out ShareListResponse
- resp, err := f.listContent("", q, nil)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- err = xmlUnmarshal(resp.Body, &out)
-
- // assign our client to the newly created Share objects
- for i := range out.Shares {
- out.Shares[i].fsc = &f
- }
- return &out, err
-}
-
-// GetServiceProperties gets the properties of your storage account's file service.
-// File service does not support logging
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
-func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
- return f.client.getServiceProperties(fileServiceName, f.auth)
-}
-
-// SetServiceProperties sets the properties of your storage account's file service.
-// File service does not support logging
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
-func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
- return f.client.setServiceProperties(props, fileServiceName, f.auth)
-}
-
-// retrieves directory or share content
-func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return nil, err
- }
-
- uri := f.client.getEndpoint(fileServiceName, path, params)
- extraHeaders = f.client.protectUserAgent(extraHeaders)
- headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
-
- resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
- if err != nil {
- return nil, err
- }
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- drainRespBody(resp)
- return nil, err
- }
-
- return resp, nil
-}
-
-// returns true if the specified resource exists
-func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return false, nil, err
- }
-
- uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
- headers := f.client.getStandardHeaders()
-
- resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusOK, resp.Header, nil
- }
- }
- return false, nil, err
-}
-
-// creates a resource depending on the specified resource type
-func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
- resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
- if err != nil {
- return nil, err
- }
- defer drainRespBody(resp)
- return resp.Header, checkRespCode(resp, expectedResponseCodes)
-}
-
-// creates a resource depending on the specified resource type, doesn't close the response body
-func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return nil, err
- }
-
- values := getURLInitValues(compNone, res)
- combinedParams := mergeParams(values, urlParams)
- uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
- extraHeaders = f.client.protectUserAgent(extraHeaders)
- headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
-
- return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
-}
-
-// returns HTTP header data for the specified directory or share
-func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
- resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
- if err != nil {
- return nil, err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
-
- return resp.Header, nil
-}
-
-// gets the specified resource, doesn't close the response body
-func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return nil, err
- }
-
- params = mergeParams(params, getURLInitValues(comp, res))
- uri := f.client.getEndpoint(fileServiceName, path, params)
- headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
-
- return f.client.exec(verb, uri, headers, nil, f.auth)
-}
-
-// deletes the resource and returns the response
-func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
- resp, err := f.deleteResourceNoClose(path, res, options)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusAccepted})
-}
-
-// deletes the resource and returns the response, doesn't close the response body
-func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return nil, err
- }
-
- values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
- uri := f.client.getEndpoint(fileServiceName, path, values)
- return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
-}
-
-// merges metadata into extraHeaders and returns extraHeaders
-func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
- if metadata == nil && extraHeaders == nil {
- return nil
- }
- if extraHeaders == nil {
- extraHeaders = make(map[string]string)
- }
- for k, v := range metadata {
- extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
- }
- return extraHeaders
-}
-
-// sets extra header data for the specified resource
-func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
- if err := f.checkForStorageEmulator(); err != nil {
- return nil, err
- }
-
- params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
- uri := f.client.getEndpoint(fileServiceName, path, params)
- extraHeaders = f.client.protectUserAgent(extraHeaders)
- headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
-
- resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
- if err != nil {
- return nil, err
- }
- defer drainRespBody(resp)
-
- return resp.Header, checkRespCode(resp, []int{http.StatusOK})
-}
-
-//checkForStorageEmulator determines if the client is setup for use with
-//Azure Storage Emulator, and returns a relevant error
-func (f FileServiceClient) checkForStorageEmulator() error {
- if f.client.accountName == StorageEmulatorAccountName {
- return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
- }
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
deleted file mode 100644
index 6453477ba6..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "errors"
- "net/http"
- "net/url"
- "strconv"
- "time"
-)
-
-// lease constants.
-const (
- leaseHeaderPrefix = "x-ms-lease-"
- headerLeaseID = "x-ms-lease-id"
- leaseAction = "x-ms-lease-action"
- leaseBreakPeriod = "x-ms-lease-break-period"
- leaseDuration = "x-ms-lease-duration"
- leaseProposedID = "x-ms-proposed-lease-id"
- leaseTime = "x-ms-lease-time"
-
- acquireLease = "acquire"
- renewLease = "renew"
- changeLease = "change"
- releaseLease = "release"
- breakLease = "break"
-)
-
-// leasePut is common PUT code for the various acquire/release/break etc functions.
-func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
- params := url.Values{"comp": {"lease"}}
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return nil, err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{expectedStatus}); err != nil {
- return nil, err
- }
-
- return resp.Header, nil
-}
-
-// LeaseOptions includes options for all operations regarding leasing blobs
-type LeaseOptions struct {
- Timeout uint
- Origin string `header:"Origin"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// AcquireLease creates a lease for a blob
-// returns leaseID acquired
-// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
-// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
-func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = acquireLease
-
- if leaseTimeInSeconds == -1 {
- // Do nothing, but don't trigger the following clauses.
- } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
- leaseTimeInSeconds = 60
- } else if leaseTimeInSeconds < 15 {
- leaseTimeInSeconds = 15
- }
-
- headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
-
- if proposedLeaseID != "" {
- headers[leaseProposedID] = proposedLeaseID
- }
-
- respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
- if err != nil {
- return "", err
- }
-
- returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
-
- if returnedLeaseID != "" {
- return returnedLeaseID, nil
- }
-
- return "", errors.New("LeaseID not returned")
-}
-
-// BreakLease breaks the lease for a blob
-// Returns the timeout remaining in the lease in seconds
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
-func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = breakLease
- return b.breakLeaseCommon(headers, options)
-}
-
-// BreakLeaseWithBreakPeriod breaks the lease for a blob
-// breakPeriodInSeconds is used to determine how long until new lease can be created.
-// Returns the timeout remaining in the lease in seconds
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
-func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = breakLease
- headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
- return b.breakLeaseCommon(headers, options)
-}
-
-// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
-func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
-
- respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
- if err != nil {
- return 0, err
- }
-
- breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
- if breakTimeoutStr != "" {
- breakTimeout, err = strconv.Atoi(breakTimeoutStr)
- if err != nil {
- return 0, err
- }
- }
-
- return breakTimeout, nil
-}
-
-// ChangeLease changes a lease ID for a blob
-// Returns the new LeaseID acquired
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
-func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = changeLease
- headers[headerLeaseID] = currentLeaseID
- headers[leaseProposedID] = proposedLeaseID
-
- respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
- if err != nil {
- return "", err
- }
-
- newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
- if newLeaseID != "" {
- return newLeaseID, nil
- }
-
- return "", errors.New("LeaseID not returned")
-}
-
-// ReleaseLease releases the lease for a blob
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
-func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = releaseLease
- headers[headerLeaseID] = currentLeaseID
-
- _, err := b.leaseCommonPut(headers, http.StatusOK, options)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
-func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
- headers := b.Container.bsc.client.getStandardHeaders()
- headers[leaseAction] = renewLease
- headers[headerLeaseID] = currentLeaseID
-
- _, err := b.leaseCommonPut(headers, http.StatusOK, options)
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
deleted file mode 100644
index e5447e4a13..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "time"
-)
-
-// Message represents an Azure message.
-type Message struct {
- Queue *Queue
- Text string `xml:"MessageText"`
- ID string `xml:"MessageId"`
- Insertion TimeRFC1123 `xml:"InsertionTime"`
- Expiration TimeRFC1123 `xml:"ExpirationTime"`
- PopReceipt string `xml:"PopReceipt"`
- NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
- DequeueCount int `xml:"DequeueCount"`
-}
-
-func (m *Message) buildPath() string {
- return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
-}
-
-// PutMessageOptions is the set of options can be specified for Put Messsage
-// operation. A zero struct does not use any preferences for the request.
-type PutMessageOptions struct {
- Timeout uint
- VisibilityTimeout int
- MessageTTL int
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Put operation adds a new message to the back of the message queue.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
-func (m *Message) Put(options *PutMessageOptions) error {
- query := url.Values{}
- headers := m.Queue.qsc.client.getStandardHeaders()
-
- req := putMessageRequest{MessageText: m.Text}
- body, nn, err := xmlMarshal(req)
- if err != nil {
- return err
- }
- headers["Content-Length"] = strconv.Itoa(nn)
-
- if options != nil {
- if options.VisibilityTimeout != 0 {
- query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
- }
- if options.MessageTTL != 0 {
- query.Set("messagettl", strconv.Itoa(options.MessageTTL))
- }
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
-
- uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
- resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- err = checkRespCode(resp, []int{http.StatusCreated})
- if err != nil {
- return err
- }
- err = xmlUnmarshal(resp.Body, m)
- if err != nil {
- return err
- }
- return nil
-}
-
-// UpdateMessageOptions is the set of options can be specified for Update Messsage
-// operation. A zero struct does not use any preferences for the request.
-type UpdateMessageOptions struct {
- Timeout uint
- VisibilityTimeout int
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Update operation updates the specified message.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
-func (m *Message) Update(options *UpdateMessageOptions) error {
- query := url.Values{}
- if m.PopReceipt != "" {
- query.Set("popreceipt", m.PopReceipt)
- }
-
- headers := m.Queue.qsc.client.getStandardHeaders()
- req := putMessageRequest{MessageText: m.Text}
- body, nn, err := xmlMarshal(req)
- if err != nil {
- return err
- }
- headers["Content-Length"] = strconv.Itoa(nn)
- // visibilitytimeout is required for Update (zero or greater) so set the default here
- query.Set("visibilitytimeout", "0")
- if options != nil {
- if options.VisibilityTimeout != 0 {
- query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
- }
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
-
- resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- m.PopReceipt = resp.Header.Get("x-ms-popreceipt")
- nextTimeStr := resp.Header.Get("x-ms-time-next-visible")
- if nextTimeStr != "" {
- nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
- if err != nil {
- return err
- }
- m.NextVisible = TimeRFC1123(nextTime)
- }
-
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// Delete operation deletes the specified message.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
-func (m *Message) Delete(options *QueueServiceOptions) error {
- params := url.Values{"popreceipt": {m.PopReceipt}}
- headers := m.Queue.qsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
-
- resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-type putMessageRequest struct {
- XMLName xml.Name `xml:"QueueMessage"`
- MessageText string `xml:"MessageText"`
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
deleted file mode 100644
index 3b05722387..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-// MetadataLevel determines if operations should return a paylod,
-// and it level of detail.
-type MetadataLevel string
-
-// This consts are meant to help with Odata supported operations
-const (
- OdataTypeSuffix = "@odata.type"
-
- // Types
-
- OdataBinary = "Edm.Binary"
- OdataDateTime = "Edm.DateTime"
- OdataDouble = "Edm.Double"
- OdataGUID = "Edm.Guid"
- OdataInt64 = "Edm.Int64"
-
- // Query options
-
- OdataFilter = "$filter"
- OdataOrderBy = "$orderby"
- OdataTop = "$top"
- OdataSkip = "$skip"
- OdataCount = "$count"
- OdataExpand = "$expand"
- OdataSelect = "$select"
- OdataSearch = "$search"
-
- EmptyPayload MetadataLevel = ""
- NoMetadata MetadataLevel = "application/json;odata=nometadata"
- MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
- FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
deleted file mode 100644
index ff93ec2ac9..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "time"
-)
-
-// GetPageRangesResponse contains the response fields from
-// Get Page Ranges call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
-type GetPageRangesResponse struct {
- XMLName xml.Name `xml:"PageList"`
- PageList []PageRange `xml:"PageRange"`
-}
-
-// PageRange contains information about a page of a page blob from
-// Get Pages Range call.
-//
-// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
-type PageRange struct {
- Start int64 `xml:"Start"`
- End int64 `xml:"End"`
-}
-
-var (
- errBlobCopyAborted = errors.New("storage: blob copy is aborted")
- errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
-)
-
-// PutPageOptions includes the options for a put page operation
-type PutPageOptions struct {
- Timeout uint
- LeaseID string `header:"x-ms-lease-id"`
- IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
- IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
- IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
- IfModifiedSince *time.Time `header:"If-Modified-Since"`
- IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
- IfMatch string `header:"If-Match"`
- IfNoneMatch string `header:"If-None-Match"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// WriteRange writes a range of pages to a page blob.
-// Ranges must be aligned with 512-byte boundaries and chunk must be of size
-// multiplies by 512.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
-func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
- if bytes == nil {
- return errors.New("bytes cannot be nil")
- }
- return b.modifyRange(blobRange, bytes, options)
-}
-
-// ClearRange clears the given range in a page blob.
-// Ranges must be aligned with 512-byte boundaries and chunk must be of size
-// multiplies by 512.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
-func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
- return b.modifyRange(blobRange, nil, options)
-}
-
-func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
- if blobRange.End < blobRange.Start {
- return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
- }
- if blobRange.Start%512 != 0 {
- return errors.New("the value for rangeStart must be a multiple of 512")
- }
- if blobRange.End%512 != 511 {
- return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
- }
-
- params := url.Values{"comp": {"page"}}
-
- // default to clear
- write := "clear"
- var cl uint64
-
- // if bytes is not nil then this is an update operation
- if bytes != nil {
- write = "update"
- cl = (blobRange.End - blobRange.Start) + 1
- }
-
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-blob-type"] = string(BlobTypePage)
- headers["x-ms-page-write"] = write
- headers["x-ms-range"] = blobRange.String()
- headers["Content-Length"] = fmt.Sprintf("%v", cl)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusCreated})
-}
-
-// GetPageRangesOptions includes the options for a get page ranges operation
-type GetPageRangesOptions struct {
- Timeout uint
- Snapshot *time.Time
- PreviousSnapshot *time.Time
- Range *BlobRange
- LeaseID string `header:"x-ms-lease-id"`
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetPageRanges returns the list of valid page ranges for a page blob.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
-func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
- params := url.Values{"comp": {"pagelist"}}
- headers := b.Container.bsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- params = addSnapshot(params, options.Snapshot)
- if options.PreviousSnapshot != nil {
- params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
- }
- if options.Range != nil {
- headers["Range"] = options.Range.String()
- }
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- var out GetPageRangesResponse
- resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return out, err
- }
- defer drainRespBody(resp)
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return out, err
- }
- err = xmlUnmarshal(resp.Body, &out)
- return out, err
-}
-
-// PutPageBlob initializes an empty page blob with specified name and maximum
-// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
-// be created using this method before writing pages.
-//
-// See CreateBlockBlobFromReader for more info on creating blobs.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
-func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
- if b.Properties.ContentLength%512 != 0 {
- return errors.New("Content length must be aligned to a 512-byte boundary")
- }
-
- params := url.Values{}
- headers := b.Container.bsc.client.getStandardHeaders()
- headers["x-ms-blob-type"] = string(BlobTypePage)
- headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
- headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
- headers = mergeHeaders(headers, headersFromStruct(b.Properties))
- headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
-
- resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
- if err != nil {
- return err
- }
- return b.respondCreation(resp, BlobTypePage)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
deleted file mode 100644
index 7731e4ebc1..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
+++ /dev/null
@@ -1,425 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "time"
-)
-
-const (
- // casing is per Golang's http.Header canonicalizing the header names.
- approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
-)
-
-// QueueAccessPolicy represents each access policy in the queue ACL.
-type QueueAccessPolicy struct {
- ID string
- StartTime time.Time
- ExpiryTime time.Time
- CanRead bool
- CanAdd bool
- CanUpdate bool
- CanProcess bool
-}
-
-// QueuePermissions represents the queue ACLs.
-type QueuePermissions struct {
- AccessPolicies []QueueAccessPolicy
-}
-
-// SetQueuePermissionOptions includes options for a set queue permissions operation
-type SetQueuePermissionOptions struct {
- Timeout uint
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Queue represents an Azure queue.
-type Queue struct {
- qsc *QueueServiceClient
- Name string
- Metadata map[string]string
- AproxMessageCount uint64
-}
-
-func (q *Queue) buildPath() string {
- return fmt.Sprintf("/%s", q.Name)
-}
-
-func (q *Queue) buildPathMessages() string {
- return fmt.Sprintf("%s/messages", q.buildPath())
-}
-
-// QueueServiceOptions includes options for some queue service operations
-type QueueServiceOptions struct {
- Timeout uint
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// Create operation creates a queue under the given account.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
-func (q *Queue) Create(options *QueueServiceOptions) error {
- params := url.Values{}
- headers := q.qsc.client.getStandardHeaders()
- headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
-
- resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusCreated})
-}
-
-// Delete operation permanently deletes the specified queue.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
-func (q *Queue) Delete(options *QueueServiceOptions) error {
- params := url.Values{}
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
- resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// Exists returns true if a queue with given name exists.
-func (q *Queue) Exists() (bool, error) {
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
- resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusOK, nil
- }
- err = getErrorFromResponse(resp)
- }
- return false, err
-}
-
-// SetMetadata operation sets user-defined metadata on the specified queue.
-// Metadata is associated with the queue as name-value pairs.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
-func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
- params := url.Values{"comp": {"metadata"}}
- headers := q.qsc.client.getStandardHeaders()
- headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
-
- resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// GetMetadata operation retrieves user-defined metadata and queue
-// properties on the specified queue. Metadata is associated with
-// the queue as name-values pairs.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
-//
-// Because the way Golang's http client (and http.Header in particular)
-// canonicalize header names, the returned metadata names would always
-// be all lower case.
-func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
- params := url.Values{"comp": {"metadata"}}
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
-
- resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
- if aproxMessagesStr != "" {
- aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
- if err != nil {
- return err
- }
- q.AproxMessageCount = aproxMessages
- }
-
- q.Metadata = getMetadataFromHeaders(resp.Header)
- return nil
-}
-
-// GetMessageReference returns a message object with the specified text.
-func (q *Queue) GetMessageReference(text string) *Message {
- return &Message{
- Queue: q,
- Text: text,
- }
-}
-
-// GetMessagesOptions is the set of options can be specified for Get
-// Messsages operation. A zero struct does not use any preferences for the
-// request.
-type GetMessagesOptions struct {
- Timeout uint
- NumOfMessages int
- VisibilityTimeout int
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-type messages struct {
- XMLName xml.Name `xml:"QueueMessagesList"`
- Messages []Message `xml:"QueueMessage"`
-}
-
-// GetMessages operation retrieves one or more messages from the front of the
-// queue.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
-func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
- query := url.Values{}
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- if options.NumOfMessages != 0 {
- query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
- }
- if options.VisibilityTimeout != 0 {
- query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
- }
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
-
- resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return []Message{}, err
- }
- defer resp.Body.Close()
-
- var out messages
- err = xmlUnmarshal(resp.Body, &out)
- if err != nil {
- return []Message{}, err
- }
- for i := range out.Messages {
- out.Messages[i].Queue = q
- }
- return out.Messages, err
-}
-
-// PeekMessagesOptions is the set of options can be specified for Peek
-// Messsage operation. A zero struct does not use any preferences for the
-// request.
-type PeekMessagesOptions struct {
- Timeout uint
- NumOfMessages int
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// PeekMessages retrieves one or more messages from the front of the queue, but
-// does not alter the visibility of the message.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
-func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
- query := url.Values{"peekonly": {"true"}} // Required for peek operation
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- if options.NumOfMessages != 0 {
- query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
- }
- query = addTimeout(query, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
-
- resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return []Message{}, err
- }
- defer resp.Body.Close()
-
- var out messages
- err = xmlUnmarshal(resp.Body, &out)
- if err != nil {
- return []Message{}, err
- }
- for i := range out.Messages {
- out.Messages[i].Queue = q
- }
- return out.Messages, err
-}
-
-// ClearMessages operation deletes all messages from the specified queue.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
-func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
- params := url.Values{}
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
-
- resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// SetPermissions sets up queue permissions
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
-func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
- body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
- if err != nil {
- return err
- }
-
- params := url.Values{
- "comp": {"acl"},
- }
- headers := q.qsc.client.getStandardHeaders()
- headers["Content-Length"] = strconv.Itoa(length)
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
- resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
- sil := SignedIdentifiers{
- SignedIdentifiers: []SignedIdentifier{},
- }
- for _, qapd := range policies {
- permission := qapd.generateQueuePermissions()
- signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
- sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
- }
- return xmlMarshal(sil)
-}
-
-func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
- // generate the permissions string (raup).
- // still want the end user API to have bool flags.
- permissions = ""
-
- if qapd.CanRead {
- permissions += "r"
- }
-
- if qapd.CanAdd {
- permissions += "a"
- }
-
- if qapd.CanUpdate {
- permissions += "u"
- }
-
- if qapd.CanProcess {
- permissions += "p"
- }
-
- return permissions
-}
-
-// GetQueuePermissionOptions includes options for a get queue permissions operation
-type GetQueuePermissionOptions struct {
- Timeout uint
- RequestID string `header:"x-ms-client-request-id"`
-}
-
-// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
-// If timeout is 0 then it will not be passed to Azure
-func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
- params := url.Values{
- "comp": {"acl"},
- }
- headers := q.qsc.client.getStandardHeaders()
-
- if options != nil {
- params = addTimeout(params, options.Timeout)
- headers = mergeHeaders(headers, headersFromStruct(*options))
- }
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
- resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- var ap AccessPolicy
- err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
- if err != nil {
- return nil, err
- }
- return buildQueueAccessPolicy(ap, &resp.Header), nil
-}
-
-func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
- permissions := QueuePermissions{
- AccessPolicies: []QueueAccessPolicy{},
- }
-
- for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
- qapd := QueueAccessPolicy{
- ID: policy.ID,
- StartTime: policy.AccessPolicy.StartTime,
- ExpiryTime: policy.AccessPolicy.ExpiryTime,
- }
- qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
- qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
- qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
- qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
-
- permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
- }
- return &permissions
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
deleted file mode 100644
index ab39f956fb..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "errors"
- "fmt"
- "net/url"
- "strings"
- "time"
-)
-
-// QueueSASOptions are options to construct a blob SAS
-// URI.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-type QueueSASOptions struct {
- QueueSASPermissions
- SASOptions
-}
-
-// QueueSASPermissions includes the available permissions for
-// a queue SAS URI.
-type QueueSASPermissions struct {
- Read bool
- Add bool
- Update bool
- Process bool
-}
-
-func (q QueueSASPermissions) buildString() string {
- permissions := ""
-
- if q.Read {
- permissions += "r"
- }
- if q.Add {
- permissions += "a"
- }
- if q.Update {
- permissions += "u"
- }
- if q.Process {
- permissions += "p"
- }
- return permissions
-}
-
-// GetSASURI creates an URL to the specified queue which contains the Shared
-// Access Signature with specified permissions and expiration time.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
- canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
- if err != nil {
- return "", err
- }
-
- // "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
- // It must include the service name (blob, table, queue or file) for version 2015-02-21 or
- // later, the storage account name, and the resource name, and must be URL-decoded.
- // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
- // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
- canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
- canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
- if err != nil {
- return "", err
- }
-
- signedStart := ""
- if options.Start != (time.Time{}) {
- signedStart = options.Start.UTC().Format(time.RFC3339)
- }
- signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
-
- protocols := "https,http"
- if options.UseHTTPS {
- protocols = "https"
- }
-
- permissions := options.QueueSASPermissions.buildString()
- stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
- if err != nil {
- return "", err
- }
-
- sig := q.qsc.client.computeHmac256(stringToSign)
- sasParams := url.Values{
- "sv": {q.qsc.client.apiVersion},
- "se": {signedExpiry},
- "sp": {permissions},
- "sig": {sig},
- }
-
- if q.qsc.client.apiVersion >= "2015-04-05" {
- sasParams.Add("spr", protocols)
- addQueryParameter(sasParams, "sip", options.IP)
- }
-
- uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
- sasURL, err := url.Parse(uri)
- if err != nil {
- return "", err
- }
- sasURL.RawQuery = sasParams.Encode()
- return sasURL.String(), nil
-}
-
-func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
-
- if signedVersion >= "2015-02-21" {
- canonicalizedResource = "/queue" + canonicalizedResource
- }
-
- // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
- if signedVersion >= "2015-04-05" {
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
- signedPermissions,
- signedStart,
- signedExpiry,
- canonicalizedResource,
- signedIdentifier,
- signedIP,
- protocols,
- signedVersion), nil
-
- }
-
- // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
- if signedVersion >= "2013-08-15" {
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
- }
-
- return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
deleted file mode 100644
index 752701c3bd..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-// QueueServiceClient contains operations for Microsoft Azure Queue Storage
-// Service.
-type QueueServiceClient struct {
- client Client
- auth authentication
-}
-
-// GetServiceProperties gets the properties of your storage account's queue service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
-func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
- return q.client.getServiceProperties(queueServiceName, q.auth)
-}
-
-// SetServiceProperties sets the properties of your storage account's queue service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
-func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
- return q.client.setServiceProperties(props, queueServiceName, q.auth)
-}
-
-// GetQueueReference returns a Container object for the specified queue name.
-func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
- return &Queue{
- qsc: q,
- Name: name,
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
deleted file mode 100644
index 30f7c14350..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "strconv"
-)
-
-// Share represents an Azure file share.
-type Share struct {
- fsc *FileServiceClient
- Name string `xml:"Name"`
- Properties ShareProperties `xml:"Properties"`
- Metadata map[string]string
-}
-
-// ShareProperties contains various properties of a share.
-type ShareProperties struct {
- LastModified string `xml:"Last-Modified"`
- Etag string `xml:"Etag"`
- Quota int `xml:"Quota"`
-}
-
-// builds the complete path for this share object.
-func (s *Share) buildPath() string {
- return fmt.Sprintf("/%s", s.Name)
-}
-
-// Create this share under the associated account.
-// If a share with the same name already exists, the operation fails.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
-func (s *Share) Create(options *FileRequestOptions) error {
- extraheaders := map[string]string{}
- if s.Properties.Quota > 0 {
- extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
- }
-
- params := prepareOptions(options)
- headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
- if err != nil {
- return err
- }
-
- s.updateEtagAndLastModified(headers)
- return nil
-}
-
-// CreateIfNotExists creates this share under the associated account if
-// it does not exist. Returns true if the share is newly created or false if
-// the share already exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
-func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
- extraheaders := map[string]string{}
- if s.Properties.Quota > 0 {
- extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
- }
-
- params := prepareOptions(options)
- resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
- if resp.StatusCode == http.StatusCreated {
- s.updateEtagAndLastModified(resp.Header)
- return true, nil
- }
- return false, s.FetchAttributes(nil)
- }
- }
-
- return false, err
-}
-
-// Delete marks this share for deletion. The share along with any files
-// and directories contained within it are later deleted during garbage
-// collection. If the share does not exist the operation fails
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
-func (s *Share) Delete(options *FileRequestOptions) error {
- return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
-}
-
-// DeleteIfExists operation marks this share for deletion if it exists.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
-func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
- resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
- if resp != nil {
- defer drainRespBody(resp)
- if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
- return resp.StatusCode == http.StatusAccepted, nil
- }
- }
- return false, err
-}
-
-// Exists returns true if this share already exists
-// on the storage account, otherwise returns false.
-func (s *Share) Exists() (bool, error) {
- exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
- if exists {
- s.updateEtagAndLastModified(headers)
- s.updateQuota(headers)
- }
- return exists, err
-}
-
-// FetchAttributes retrieves metadata and properties for this share.
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
-func (s *Share) FetchAttributes(options *FileRequestOptions) error {
- params := prepareOptions(options)
- headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
- if err != nil {
- return err
- }
-
- s.updateEtagAndLastModified(headers)
- s.updateQuota(headers)
- s.Metadata = getMetadataFromHeaders(headers)
-
- return nil
-}
-
-// GetRootDirectoryReference returns a Directory object at the root of this share.
-func (s *Share) GetRootDirectoryReference() *Directory {
- return &Directory{
- fsc: s.fsc,
- share: s,
- }
-}
-
-// ServiceClient returns the FileServiceClient associated with this share.
-func (s *Share) ServiceClient() *FileServiceClient {
- return s.fsc
-}
-
-// SetMetadata replaces the metadata for this share.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by GetShareMetadata. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
-func (s *Share) SetMetadata(options *FileRequestOptions) error {
- headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
- if err != nil {
- return err
- }
-
- s.updateEtagAndLastModified(headers)
- return nil
-}
-
-// SetProperties sets system properties for this share.
-//
-// Some keys may be converted to Camel-Case before sending. All keys
-// are returned in lower case by SetShareProperties. HTTP header names
-// are case-insensitive so case munging should not matter to other
-// applications either.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
-func (s *Share) SetProperties(options *FileRequestOptions) error {
- extraheaders := map[string]string{}
- if s.Properties.Quota > 0 {
- if s.Properties.Quota > 5120 {
- return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
- }
- extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
- }
-
- headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
- if err != nil {
- return err
- }
-
- s.updateEtagAndLastModified(headers)
- return nil
-}
-
-// updates Etag and last modified date
-func (s *Share) updateEtagAndLastModified(headers http.Header) {
- s.Properties.Etag = headers.Get("Etag")
- s.Properties.LastModified = headers.Get("Last-Modified")
-}
-
-// updates quota value
-func (s *Share) updateQuota(headers http.Header) {
- quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
- if err == nil {
- s.Properties.Quota = quota
- }
-}
-
-// URL gets the canonical URL to this share. This method does not create a publicly accessible
-// URL if the share is private and this method does not check if the share exists.
-func (s *Share) URL() string {
- return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
deleted file mode 100644
index 35d13670cb..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "strings"
- "time"
-)
-
-// AccessPolicyDetailsXML has specifics about an access policy
-// annotated with XML details.
-type AccessPolicyDetailsXML struct {
- StartTime time.Time `xml:"Start"`
- ExpiryTime time.Time `xml:"Expiry"`
- Permission string `xml:"Permission"`
-}
-
-// SignedIdentifier is a wrapper for a specific policy
-type SignedIdentifier struct {
- ID string `xml:"Id"`
- AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
-}
-
-// SignedIdentifiers part of the response from GetPermissions call.
-type SignedIdentifiers struct {
- SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
-}
-
-// AccessPolicy is the response type from the GetPermissions call.
-type AccessPolicy struct {
- SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
-}
-
-// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
-// AccessPolicy struct which will get converted to XML.
-func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
- return SignedIdentifier{
- ID: id,
- AccessPolicy: AccessPolicyDetailsXML{
- StartTime: startTime.UTC().Round(time.Second),
- ExpiryTime: expiryTime.UTC().Round(time.Second),
- Permission: permissions,
- },
- }
-}
-
-func updatePermissions(permissions, permission string) bool {
- return strings.Contains(permissions, permission)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
deleted file mode 100644
index d139db7765..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "net/http"
- "net/url"
- "strconv"
-)
-
-// ServiceProperties represents the storage account service properties
-type ServiceProperties struct {
- Logging *Logging
- HourMetrics *Metrics
- MinuteMetrics *Metrics
- Cors *Cors
- DeleteRetentionPolicy *RetentionPolicy // blob storage only
- StaticWebsite *StaticWebsite // blob storage only
-}
-
-// Logging represents the Azure Analytics Logging settings
-type Logging struct {
- Version string
- Delete bool
- Read bool
- Write bool
- RetentionPolicy *RetentionPolicy
-}
-
-// RetentionPolicy indicates if retention is enabled and for how many days
-type RetentionPolicy struct {
- Enabled bool
- Days *int
-}
-
-// Metrics provide request statistics.
-type Metrics struct {
- Version string
- Enabled bool
- IncludeAPIs *bool
- RetentionPolicy *RetentionPolicy
-}
-
-// Cors includes all the CORS rules
-type Cors struct {
- CorsRule []CorsRule
-}
-
-// CorsRule includes all settings for a Cors rule
-type CorsRule struct {
- AllowedOrigins string
- AllowedMethods string
- MaxAgeInSeconds int
- ExposedHeaders string
- AllowedHeaders string
-}
-
-// StaticWebsite - The properties that enable an account to host a static website
-type StaticWebsite struct {
- // Enabled - Indicates whether this account is hosting a static website
- Enabled bool
- // IndexDocument - The default name of the index page under each directory
- IndexDocument *string
- // ErrorDocument404Path - The absolute path of the custom 404 page
- ErrorDocument404Path *string
-}
-
-func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
- query := url.Values{
- "restype": {"service"},
- "comp": {"properties"},
- }
- uri := c.getEndpoint(service, "", query)
- headers := c.getStandardHeaders()
-
- resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
-
- var out ServiceProperties
- err = xmlUnmarshal(resp.Body, &out)
- if err != nil {
- return nil, err
- }
-
- return &out, nil
-}
-
-func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
- query := url.Values{
- "restype": {"service"},
- "comp": {"properties"},
- }
- uri := c.getEndpoint(service, "", query)
-
- // Ideally, StorageServiceProperties would be the output struct
- // This is to avoid golint stuttering, while generating the correct XML
- type StorageServiceProperties struct {
- Logging *Logging
- HourMetrics *Metrics
- MinuteMetrics *Metrics
- Cors *Cors
- DeleteRetentionPolicy *RetentionPolicy
- StaticWebsite *StaticWebsite
- }
- input := StorageServiceProperties{
- Logging: props.Logging,
- HourMetrics: props.HourMetrics,
- MinuteMetrics: props.MinuteMetrics,
- Cors: props.Cors,
- }
- // only set these fields for blob storage else it's invalid XML
- if service == blobServiceName {
- input.DeleteRetentionPolicy = props.DeleteRetentionPolicy
- input.StaticWebsite = props.StaticWebsite
- }
-
- body, length, err := xmlMarshal(input)
- if err != nil {
- return err
- }
-
- headers := c.getStandardHeaders()
- headers["Content-Length"] = strconv.Itoa(length)
-
- resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
- return checkRespCode(resp, []int{http.StatusAccepted})
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
deleted file mode 100644
index fc8631ee20..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- tablesURIPath = "/Tables"
- nextTableQueryParameter = "NextTableName"
- headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
- headerNextRowKey = "x-ms-continuation-NextRowKey"
- nextPartitionKeyQueryParameter = "NextPartitionKey"
- nextRowKeyQueryParameter = "NextRowKey"
-)
-
-// TableAccessPolicy are used for SETTING table policies
-type TableAccessPolicy struct {
- ID string
- StartTime time.Time
- ExpiryTime time.Time
- CanRead bool
- CanAppend bool
- CanUpdate bool
- CanDelete bool
-}
-
-// Table represents an Azure table.
-type Table struct {
- tsc *TableServiceClient
- Name string `json:"TableName"`
- OdataEditLink string `json:"odata.editLink"`
- OdataID string `json:"odata.id"`
- OdataMetadata string `json:"odata.metadata"`
- OdataType string `json:"odata.type"`
-}
-
-// EntityQueryResult contains the response from
-// ExecuteQuery and ExecuteQueryNextResults functions.
-type EntityQueryResult struct {
- OdataMetadata string `json:"odata.metadata"`
- Entities []*Entity `json:"value"`
- QueryNextLink
- table *Table
-}
-
-type continuationToken struct {
- NextPartitionKey string
- NextRowKey string
-}
-
-func (t *Table) buildPath() string {
- return fmt.Sprintf("/%s", t.Name)
-}
-
-func (t *Table) buildSpecificPath() string {
- return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
-}
-
-// Get gets the referenced table.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
-func (t *Table) Get(timeout uint, ml MetadataLevel) error {
- if ml == EmptyPayload {
- return errEmptyPayload
- }
-
- query := url.Values{
- "timeout": {strconv.FormatUint(uint64(timeout), 10)},
- }
- headers := t.tsc.client.getStandardHeaders()
- headers[headerAccept] = string(ml)
-
- uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
- resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return err
- }
-
- respBody, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return err
- }
- err = json.Unmarshal(respBody, t)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Create creates the referenced table.
-// This function fails if the name is not compliant
-// with the specification or the tables already exists.
-// ml determines the level of detail of metadata in the operation response,
-// or no data at all.
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
-func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
- uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
- "timeout": {strconv.FormatUint(uint64(timeout), 10)},
- })
-
- type createTableRequest struct {
- TableName string `json:"TableName"`
- }
- req := createTableRequest{TableName: t.Name}
- buf := new(bytes.Buffer)
- if err := json.NewEncoder(buf).Encode(req); err != nil {
- return err
- }
-
- headers := t.tsc.client.getStandardHeaders()
- headers = addReturnContentHeaders(headers, ml)
- headers = addBodyRelatedHeaders(headers, buf.Len())
- headers = options.addToHeaders(headers)
-
- resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
-
- if ml == EmptyPayload {
- if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
- return err
- }
- } else {
- if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
- return err
- }
- }
-
- if ml != EmptyPayload {
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return err
- }
- err = json.Unmarshal(data, t)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Delete deletes the referenced table.
-// This function fails if the table is not present.
-// Be advised: Delete deletes all the entries that may be present.
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
-func (t *Table) Delete(timeout uint, options *TableOptions) error {
- uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
- "timeout": {strconv.Itoa(int(timeout))},
- })
-
- headers := t.tsc.client.getStandardHeaders()
- headers = addReturnContentHeaders(headers, EmptyPayload)
- headers = options.addToHeaders(headers)
-
- resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-// QueryOptions includes options for a query entities operation.
-// Top, filter and select are OData query options.
-type QueryOptions struct {
- Top uint
- Filter string
- Select []string
- RequestID string
-}
-
-func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
- query := url.Values{}
- headers := map[string]string{}
- if options != nil {
- if options.Top > 0 {
- query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
- }
- if options.Filter != "" {
- query.Add(OdataFilter, options.Filter)
- }
- if len(options.Select) > 0 {
- query.Add(OdataSelect, strings.Join(options.Select, ","))
- }
- headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
- }
- return query, headers
-}
-
-// QueryEntities returns the entities in the table.
-// You can use query options defined by the OData Protocol specification.
-//
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
-func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
- if ml == EmptyPayload {
- return nil, errEmptyPayload
- }
- query, headers := options.getParameters()
- query = addTimeout(query, timeout)
- uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
- return t.queryEntities(uri, headers, ml)
-}
-
-// NextResults returns the next page of results
-// from a QueryEntities or NextResults operation.
-//
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
-func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
- if eqr == nil {
- return nil, errNilPreviousResult
- }
- if eqr.NextLink == nil {
- return nil, errNilNextLink
- }
- headers := options.addToHeaders(map[string]string{})
- return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
-}
-
-// SetPermissions sets up table ACL permissions
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
-func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
- params := url.Values{"comp": {"acl"},
- "timeout": {strconv.Itoa(int(timeout))},
- }
-
- uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
- headers := t.tsc.client.getStandardHeaders()
- headers = options.addToHeaders(headers)
-
- body, length, err := generateTableACLPayload(tap)
- if err != nil {
- return err
- }
- headers["Content-Length"] = strconv.Itoa(length)
-
- resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp)
-
- return checkRespCode(resp, []int{http.StatusNoContent})
-}
-
-func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
- sil := SignedIdentifiers{
- SignedIdentifiers: []SignedIdentifier{},
- }
- for _, tap := range policies {
- permission := generateTablePermissions(&tap)
- signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
- sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
- }
- return xmlMarshal(sil)
-}
-
-// GetPermissions gets the table ACL permissions
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
-func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
- params := url.Values{"comp": {"acl"},
- "timeout": {strconv.Itoa(int(timeout))},
- }
-
- uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
- headers := t.tsc.client.getStandardHeaders()
- headers = options.addToHeaders(headers)
-
- resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
-
- var ap AccessPolicy
- err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
- if err != nil {
- return nil, err
- }
- return updateTableAccessPolicy(ap), nil
-}
-
-func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
- headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
- if ml != EmptyPayload {
- headers[headerAccept] = string(ml)
- }
-
- resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
-
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- var entities EntityQueryResult
- err = json.Unmarshal(data, &entities)
- if err != nil {
- return nil, err
- }
-
- for i := range entities.Entities {
- entities.Entities[i].Table = t
- }
- entities.table = t
-
- contToken := extractContinuationTokenFromHeaders(resp.Header)
- if contToken == nil {
- entities.NextLink = nil
- } else {
- originalURI, err := url.Parse(uri)
- if err != nil {
- return nil, err
- }
- v := originalURI.Query()
- if contToken.NextPartitionKey != "" {
- v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
- }
- if contToken.NextRowKey != "" {
- v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
- }
- newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
- entities.NextLink = &newURI
- entities.ml = ml
- }
-
- return &entities, nil
-}
-
-func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
- ct := continuationToken{
- NextPartitionKey: h.Get(headerNextPartitionKey),
- NextRowKey: h.Get(headerNextRowKey),
- }
-
- if ct.NextPartitionKey != "" || ct.NextRowKey != "" {
- return &ct
- }
- return nil
-}
-
-func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
- taps := []TableAccessPolicy{}
- for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
- tap := TableAccessPolicy{
- ID: policy.ID,
- StartTime: policy.AccessPolicy.StartTime,
- ExpiryTime: policy.AccessPolicy.ExpiryTime,
- }
- tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
- tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
- tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
- tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
-
- taps = append(taps, tap)
- }
- return taps
-}
-
-func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
- // generate the permissions string (raud).
- // still want the end user API to have bool flags.
- permissions = ""
-
- if tap.CanRead {
- permissions += "r"
- }
-
- if tap.CanAppend {
- permissions += "a"
- }
-
- if tap.CanUpdate {
- permissions += "u"
- }
-
- if tap.CanDelete {
- permissions += "d"
- }
- return permissions
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
deleted file mode 100644
index b5aaefe473..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
+++ /dev/null
@@ -1,314 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "sort"
- "strings"
-)
-
-// Operation type. Insert, Delete, Replace etc.
-type Operation int
-
-// consts for batch operations.
-const (
- InsertOp = Operation(1)
- DeleteOp = Operation(2)
- ReplaceOp = Operation(3)
- MergeOp = Operation(4)
- InsertOrReplaceOp = Operation(5)
- InsertOrMergeOp = Operation(6)
-)
-
-// BatchEntity used for tracking Entities to operate on and
-// whether operations (replace/merge etc) should be forced.
-// Wrapper for regular Entity with additional data specific for the entity.
-type BatchEntity struct {
- *Entity
- Force bool
- Op Operation
-}
-
-// TableBatch stores all the entities that will be operated on during a batch process.
-// Entities can be inserted, replaced or deleted.
-type TableBatch struct {
- BatchEntitySlice []BatchEntity
-
- // reference to table we're operating on.
- Table *Table
-}
-
-// defaultChangesetHeaders for changeSets
-var defaultChangesetHeaders = map[string]string{
- "Accept": "application/json;odata=minimalmetadata",
- "Content-Type": "application/json",
- "Prefer": "return-no-content",
-}
-
-// NewBatch return new TableBatch for populating.
-func (t *Table) NewBatch() *TableBatch {
- return &TableBatch{
- Table: t,
- }
-}
-
-// InsertEntity adds an entity in preparation for a batch insert.
-func (t *TableBatch) InsertEntity(entity *Entity) {
- be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
-func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
- be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
-func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
- t.InsertOrReplaceEntity(entity, true)
-}
-
-// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
-func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
- be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
-func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
- t.InsertOrMergeEntity(entity, true)
-}
-
-// ReplaceEntity adds an entity in preparation for a batch replace.
-func (t *TableBatch) ReplaceEntity(entity *Entity) {
- be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// DeleteEntity adds an entity in preparation for a batch delete
-func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
- be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
-func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
- t.DeleteEntity(entity, true)
-}
-
-// MergeEntity adds an entity in preparation for a batch merge
-func (t *TableBatch) MergeEntity(entity *Entity) {
- be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
- t.BatchEntitySlice = append(t.BatchEntitySlice, be)
-}
-
-// ExecuteBatch executes many table operations in one request to Azure.
-// The operations can be combinations of Insert, Delete, Replace and Merge
-// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
-// the changesets.
-// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
-func (t *TableBatch) ExecuteBatch() error {
-
- id, err := newUUID()
- if err != nil {
- return err
- }
-
- changesetBoundary := fmt.Sprintf("changeset_%s", id.String())
- uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
- changesetBody, err := t.generateChangesetBody(changesetBoundary)
- if err != nil {
- return err
- }
-
- id, err = newUUID()
- if err != nil {
- return err
- }
-
- boundary := fmt.Sprintf("batch_%s", id.String())
- body, err := generateBody(changesetBody, changesetBoundary, boundary)
- if err != nil {
- return err
- }
-
- headers := t.Table.tsc.client.getStandardHeaders()
- headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
-
- resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
- if err != nil {
- return err
- }
- defer drainRespBody(resp.resp)
-
- if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil {
-
- // check which batch failed.
- operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
- requestID, date, version := getDebugHeaders(resp.resp.Header)
- return AzureStorageServiceError{
- StatusCode: resp.resp.StatusCode,
- Code: resp.odata.Err.Code,
- RequestID: requestID,
- Date: date,
- APIVersion: version,
- Message: operationFailedMessage,
- }
- }
-
- return nil
-}
-
-// getFailedOperation parses the original Azure error string and determines which operation failed
-// and generates appropriate message.
-func (t *TableBatch) getFailedOperation(errorMessage string) string {
- // errorMessage consists of "number:string" we just need the number.
- sp := strings.Split(errorMessage, ":")
- if len(sp) > 1 {
- msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
- return msg
- }
-
- // cant parse the message, just return the original message to client
- return errorMessage
-}
-
-// generateBody generates the complete body for the batch request.
-func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
-
- body := new(bytes.Buffer)
- writer := multipart.NewWriter(body)
- writer.SetBoundary(boundary)
- h := make(textproto.MIMEHeader)
- h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
- batchWriter, err := writer.CreatePart(h)
- if err != nil {
- return nil, err
- }
- batchWriter.Write(changeSetBody.Bytes())
- writer.Close()
- return body, nil
-}
-
-// generateChangesetBody generates the individual changesets for the various operations within the batch request.
-// There is a changeset for Insert, Delete, Merge etc.
-func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
-
- body := new(bytes.Buffer)
- writer := multipart.NewWriter(body)
- writer.SetBoundary(changesetBoundary)
-
- for _, be := range t.BatchEntitySlice {
- t.generateEntitySubset(&be, writer)
- }
-
- writer.Close()
- return body, nil
-}
-
-// generateVerb generates the HTTP request VERB required for each changeset.
-func generateVerb(op Operation) (string, error) {
- switch op {
- case InsertOp:
- return http.MethodPost, nil
- case DeleteOp:
- return http.MethodDelete, nil
- case ReplaceOp, InsertOrReplaceOp:
- return http.MethodPut, nil
- case MergeOp, InsertOrMergeOp:
- return "MERGE", nil
- default:
- return "", errors.New("Unable to detect operation")
- }
-}
-
-// generateQueryPath generates the query path for within the changesets
-// For inserts it will just be a table query path (table name)
-// but for other operations (modifying an existing entity) then
-// the partition/row keys need to be generated.
-func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
- if op == InsertOp {
- return entity.Table.buildPath()
- }
- return entity.buildPath()
-}
-
-// generateGenericOperationHeaders generates common headers for a given operation.
-func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
- retval := map[string]string{}
-
- for k, v := range defaultChangesetHeaders {
- retval[k] = v
- }
-
- if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
- if be.Force || be.Entity.OdataEtag == "" {
- retval["If-Match"] = "*"
- } else {
- retval["If-Match"] = be.Entity.OdataEtag
- }
- }
-
- return retval
-}
-
-// generateEntitySubset generates body payload for particular batch entity
-func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
-
- h := make(textproto.MIMEHeader)
- h.Set(headerContentType, "application/http")
- h.Set(headerContentTransferEncoding, "binary")
-
- verb, err := generateVerb(batchEntity.Op)
- if err != nil {
- return err
- }
-
- genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
- queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
- uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
-
- operationWriter, err := writer.CreatePart(h)
- if err != nil {
- return err
- }
-
- urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
- operationWriter.Write([]byte(urlAndVerb))
- writeHeaders(genericOpHeadersMap, &operationWriter)
- operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
-
- // delete operation doesn't need a body.
- if batchEntity.Op != DeleteOp {
- //var e Entity = batchEntity.Entity
- body, err := json.Marshal(batchEntity.Entity)
- if err != nil {
- return err
- }
- operationWriter.Write(body)
- }
-
- return nil
-}
-
-func writeHeaders(h map[string]string, writer *io.Writer) {
- // This way it is guaranteed the headers will be written in a sorted order
- var keys []string
- for k := range h {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
deleted file mode 100644
index 8eccd5927b..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
-)
-
-const (
- headerAccept = "Accept"
- headerEtag = "Etag"
- headerPrefer = "Prefer"
- headerXmsContinuation = "x-ms-Continuation-NextTableName"
-)
-
-// TableServiceClient contains operations for Microsoft Azure Table Storage
-// Service.
-type TableServiceClient struct {
- client Client
- auth authentication
-}
-
-// TableOptions includes options for some table operations
-type TableOptions struct {
- RequestID string
-}
-
-func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
- if options != nil {
- h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
- }
- return h
-}
-
-// QueryNextLink includes information for getting the next page of
-// results in query operations
-type QueryNextLink struct {
- NextLink *string
- ml MetadataLevel
-}
-
-// GetServiceProperties gets the properties of your storage account's table service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
-func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
- return t.client.getServiceProperties(tableServiceName, t.auth)
-}
-
-// SetServiceProperties sets the properties of your storage account's table service.
-// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
-func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
- return t.client.setServiceProperties(props, tableServiceName, t.auth)
-}
-
-// GetTableReference returns a Table object for the specified table name.
-func (t *TableServiceClient) GetTableReference(name string) *Table {
- return &Table{
- tsc: t,
- Name: name,
- }
-}
-
-// QueryTablesOptions includes options for some table operations
-type QueryTablesOptions struct {
- Top uint
- Filter string
- RequestID string
-}
-
-func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
- query := url.Values{}
- headers := map[string]string{}
- if options != nil {
- if options.Top > 0 {
- query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
- }
- if options.Filter != "" {
- query.Add(OdataFilter, options.Filter)
- }
- headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
- }
- return query, headers
-}
-
-// QueryTables returns the tables in the storage account.
-// You can use query options defined by the OData Protocol specification.
-//
-// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
-func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
- query, headers := options.getParameters()
- uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
- return t.queryTables(uri, headers, ml)
-}
-
-// NextResults returns the next page of results
-// from a QueryTables or a NextResults operation.
-//
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
-// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
-func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
- if tqr == nil {
- return nil, errNilPreviousResult
- }
- if tqr.NextLink == nil {
- return nil, errNilNextLink
- }
- headers := options.addToHeaders(map[string]string{})
-
- return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
-}
-
-// TableQueryResult contains the response from
-// QueryTables and QueryTablesNextResults functions.
-type TableQueryResult struct {
- OdataMetadata string `json:"odata.metadata"`
- Tables []Table `json:"value"`
- QueryNextLink
- tsc *TableServiceClient
-}
-
-func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
- if ml == EmptyPayload {
- return nil, errEmptyPayload
- }
- headers = mergeHeaders(headers, t.client.getStandardHeaders())
- headers[headerAccept] = string(ml)
-
- resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
- return nil, err
- }
-
- respBody, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- var out TableQueryResult
- err = json.Unmarshal(respBody, &out)
- if err != nil {
- return nil, err
- }
-
- for i := range out.Tables {
- out.Tables[i].tsc = t
- }
- out.tsc = t
-
- nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation))
- if nextLink == "" {
- out.NextLink = nil
- } else {
- originalURI, err := url.Parse(uri)
- if err != nil {
- return nil, err
- }
- v := originalURI.Query()
- v.Set(nextTableQueryParameter, nextLink)
- newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
- out.NextLink = &newURI
- out.ml = ml
- }
-
- return &out, nil
-}
-
-func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
- h[headerContentType] = "application/json"
- h[headerContentLength] = fmt.Sprintf("%v", length)
- h[headerAcceptCharset] = "UTF-8"
- return h
-}
-
-func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
- if ml != EmptyPayload {
- h[headerPrefer] = "return-content"
- h[headerAccept] = string(ml)
- } else {
- h[headerPrefer] = "return-no-content"
- // From API version 2015-12-11 onwards, Accept header is required
- h[headerAccept] = string(NoMetadata)
- }
- return h
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
deleted file mode 100644
index 47a871991d..0000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package storage
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/rand"
- "crypto/sha256"
- "encoding/base64"
- "encoding/xml"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/gofrs/uuid"
-)
-
-var (
- fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
- accountSASOptions = AccountSASTokenOptions{
- Services: Services{
- Blob: true,
- },
- ResourceTypes: ResourceTypes{
- Service: true,
- Container: true,
- Object: true,
- },
- Permissions: Permissions{
- Read: true,
- Write: true,
- Delete: true,
- List: true,
- Add: true,
- Create: true,
- Update: true,
- Process: true,
- },
- Expiry: fixedTime,
- UseHTTPS: true,
- }
-)
-
-func (c Client) computeHmac256(message string) string {
- h := hmac.New(sha256.New, c.accountKey)
- h.Write([]byte(message))
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
-
-func currentTimeRfc1123Formatted() string {
- return timeRfc1123Formatted(time.Now().UTC())
-}
-
-func timeRfc1123Formatted(t time.Time) string {
- return t.Format(http.TimeFormat)
-}
-
-func timeRFC3339Formatted(t time.Time) string {
- return t.Format("2006-01-02T15:04:05.0000000Z")
-}
-
-func mergeParams(v1, v2 url.Values) url.Values {
- out := url.Values{}
- for k, v := range v1 {
- out[k] = v
- }
- for k, v := range v2 {
- vals, ok := out[k]
- if ok {
- vals = append(vals, v...)
- out[k] = vals
- } else {
- out[k] = v
- }
- }
- return out
-}
-
-func prepareBlockListRequest(blocks []Block) string {
- s := ``
- for _, v := range blocks {
- s += fmt.Sprintf("<%s>%s%s>", v.Status, v.ID, v.Status)
- }
- s += ``
- return s
-}
-
-func xmlUnmarshal(body io.Reader, v interface{}) error {
- data, err := ioutil.ReadAll(body)
- if err != nil {
- return err
- }
- return xml.Unmarshal(data, v)
-}
-
-func xmlMarshal(v interface{}) (io.Reader, int, error) {
- b, err := xml.Marshal(v)
- if err != nil {
- return nil, 0, err
- }
- return bytes.NewReader(b), len(b), nil
-}
-
-func headersFromStruct(v interface{}) map[string]string {
- headers := make(map[string]string)
- value := reflect.ValueOf(v)
- for i := 0; i < value.NumField(); i++ {
- key := value.Type().Field(i).Tag.Get("header")
- if key != "" {
- reflectedValue := reflect.Indirect(value.Field(i))
- var val string
- if reflectedValue.IsValid() {
- switch reflectedValue.Type() {
- case reflect.TypeOf(fixedTime):
- val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
- case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
- val = strconv.FormatUint(reflectedValue.Uint(), 10)
- case reflect.TypeOf(int(0)):
- val = strconv.FormatInt(reflectedValue.Int(), 10)
- default:
- val = reflectedValue.String()
- }
- }
- if val != "" {
- headers[key] = val
- }
- }
- }
- return headers
-}
-
-// merges extraHeaders into headers and returns headers
-func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
- for k, v := range extraHeaders {
- headers[k] = v
- }
- return headers
-}
-
-func addToHeaders(h map[string]string, key, value string) map[string]string {
- if value != "" {
- h[key] = value
- }
- return h
-}
-
-func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
- if value != nil {
- h = addToHeaders(h, key, timeRfc1123Formatted(*value))
- }
- return h
-}
-
-func addTimeout(params url.Values, timeout uint) url.Values {
- if timeout > 0 {
- params.Add("timeout", fmt.Sprintf("%v", timeout))
- }
- return params
-}
-
-func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
- if snapshot != nil {
- params.Add("snapshot", timeRFC3339Formatted(*snapshot))
- }
- return params
-}
-
-func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
- var out time.Time
- var err error
- outStr := h.Get(key)
- if outStr != "" {
- out, err = time.Parse(time.RFC1123, outStr)
- if err != nil {
- return nil, err
- }
- }
- return &out, nil
-}
-
-// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
-type TimeRFC1123 time.Time
-
-// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
-func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
- var value string
- d.DecodeElement(&value, &start)
- parse, err := time.Parse(time.RFC1123, value)
- if err != nil {
- return err
- }
- *t = TimeRFC1123(parse)
- return nil
-}
-
-// MarshalXML marshals using time.RFC1123.
-func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
- return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start)
-}
-
-// returns a map of custom metadata values from the specified HTTP header
-func getMetadataFromHeaders(header http.Header) map[string]string {
- metadata := make(map[string]string)
- for k, v := range header {
- // Can't trust CanonicalHeaderKey() to munge case
- // reliably. "_" is allowed in identifiers:
- // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
- // https://msdn.microsoft.com/library/aa664670(VS.71).aspx
- // http://tools.ietf.org/html/rfc7230#section-3.2
- // ...but "_" is considered invalid by
- // CanonicalMIMEHeaderKey in
- // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
- // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
- k = strings.ToLower(k)
- if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
- continue
- }
- // metadata["lol"] = content of the last X-Ms-Meta-Lol header
- k = k[len(userDefinedMetadataHeaderPrefix):]
- metadata[k] = v[len(v)-1]
- }
-
- if len(metadata) == 0 {
- return nil
- }
-
- return metadata
-}
-
-// newUUID returns a new uuid using RFC 4122 algorithm.
-func newUUID() (uuid.UUID, error) {
- u := [16]byte{}
- // Set all bits to randomly (or pseudo-randomly) chosen values.
- _, err := rand.Read(u[:])
- if err != nil {
- return uuid.UUID{}, err
- }
- u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122)
- u[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4)
- return uuid.FromBytes(u[:])
-}
diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore
deleted file mode 100644
index 666dbbb5bc..0000000000
--- a/vendor/github.com/gofrs/uuid/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# binary bundle generated by go-fuzz
-uuid-fuzz.zip
diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE
deleted file mode 100644
index 926d549870..0000000000
--- a/vendor/github.com/gofrs/uuid/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013-2018 by Maxim Bublis
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md
deleted file mode 100644
index 4f73bec82c..0000000000
--- a/vendor/github.com/gofrs/uuid/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# UUID
-
-[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE)
-[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid)
-[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid)
-[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/)
-[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid)
-
-Package uuid provides a pure Go implementation of Universally Unique Identifiers
-(UUID) variant as defined in RFC-4122. This package supports both the creation
-and parsing of UUIDs in different formats.
-
-This package supports the following UUID versions:
-* Version 1, based on timestamp and MAC address (RFC-4122)
-* Version 3, based on MD5 hashing of a named value (RFC-4122)
-* Version 4, based on random numbers (RFC-4122)
-* Version 5, based on SHA-1 hashing of a named value (RFC-4122)
-
-This package also supports experimental Universally Unique Identifier implementations based on a
-[draft RFC](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html) that updates RFC-4122
-* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122)
-* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122)
-
-The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases
-to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted.
-
-## Project History
-
-This project was originally forked from the
-[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after
-it appeared to be no longer maintained, while exhibiting [critical
-flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take
-over this project to ensure it receives regular maintenance for the benefit of
-the larger Go community.
-
-We'd like to thank Maxim Bublis for his hard work on the original iteration of
-the package.
-
-## License
-
-This source code of this package is released under the MIT License. Please see
-the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full
-content of the license.
-
-## Recommended Package Version
-
-We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were
-created before our fork of the original package and have some known
-deficiencies.
-
-## Installation
-
-It is recommended to use a package manager like `dep` that understands tagged
-releases of a package, as well as semantic versioning.
-
-If you are unable to make use of a dependency manager with your project, you can
-use the `go get` command to download it directly:
-
-```Shell
-$ go get github.com/gofrs/uuid
-```
-
-## Requirements
-
-Due to subtests not being supported in older versions of Go, this package is
-only regularly tested against Go 1.7+. This package may work perfectly fine with
-Go 1.2+, but support for these older versions is not actively maintained.
-
-## Go 1.11 Modules
-
-As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path.
-
-An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0.
-
-Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details.
-
-## Usage
-
-Here is a quick overview of how to use this package. For more detailed
-documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid).
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/gofrs/uuid"
-)
-
-// Create a Version 4 UUID, panicking on error.
-// Use this form to initialize package-level variables.
-var u1 = uuid.Must(uuid.NewV4())
-
-func main() {
- // Create a Version 4 UUID.
- u2, err := uuid.NewV4()
- if err != nil {
- log.Fatalf("failed to generate UUID: %v", err)
- }
- log.Printf("generated Version 4 UUID %v", u2)
-
- // Parse a UUID from a string.
- s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- u3, err := uuid.FromString(s)
- if err != nil {
- log.Fatalf("failed to parse UUID %q: %v", s, err)
- }
- log.Printf("successfully parsed UUID %v", u3)
-}
-```
-
-## References
-
-* [RFC-4122](https://tools.ietf.org/html/rfc4122)
-* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
-* [New UUID Formats RFC Draft (Peabody) Rev 04](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#)
diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go
deleted file mode 100644
index 665026414c..0000000000
--- a/vendor/github.com/gofrs/uuid/codec.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "errors"
- "fmt"
-)
-
-// FromBytes returns a UUID generated from the raw byte slice input.
-// It will return an error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (UUID, error) {
- u := UUID{}
- err := u.UnmarshalBinary(input)
- return u, err
-}
-
-// FromBytesOrNil returns a UUID generated from the raw byte slice input.
-// Same behavior as FromBytes(), but returns uuid.Nil instead of an error.
-func FromBytesOrNil(input []byte) UUID {
- uuid, err := FromBytes(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-var errInvalidFormat = errors.New("uuid: invalid UUID format")
-
-func fromHexChar(c byte) byte {
- switch {
- case '0' <= c && c <= '9':
- return c - '0'
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10
- }
- return 255
-}
-
-// Parse parses the UUID stored in the string text. Parsing and supported
-// formats are the same as UnmarshalText.
-func (u *UUID) Parse(s string) error {
- switch len(s) {
- case 32: // hash
- case 36: // canonical
- case 34, 38:
- if s[0] != '{' || s[len(s)-1] != '}' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s)
- }
- s = s[1 : len(s)-1]
- case 41, 45:
- if s[:9] != "urn:uuid:" {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s[:9])
- }
- s = s[9:]
- default:
- return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(s), s)
- }
- // canonical
- if len(s) == 36 {
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s)
- }
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v1 := fromHexChar(s[x])
- v2 := fromHexChar(s[x+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i] = (v1 << 4) | v2
- }
- return nil
- }
- // hash like
- for i := 0; i < 32; i += 2 {
- v1 := fromHexChar(s[i])
- v2 := fromHexChar(s[i+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i/2] = (v1 << 4) | v2
- }
- return nil
-}
-
-// FromString returns a UUID parsed from the input string.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(text string) (UUID, error) {
- var u UUID
- err := u.Parse(text)
- return u, err
-}
-
-// FromStringOrNil returns a UUID parsed from the input string.
-// Same behavior as FromString(), but returns uuid.Nil instead of an error.
-func FromStringOrNil(input string) UUID {
- uuid, err := FromString(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by the String() method.
-func (u UUID) MarshalText() ([]byte, error) {
- var buf [36]byte
- encodeCanonical(buf[:], u)
- return buf[:], nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-//
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-// "6ba7b8109dad11d180b400c04fd430c8"
-// "{6ba7b8109dad11d180b400c04fd430c8}",
-// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
-//
-// ABNF for supported UUID text representation follows:
-//
-// URN := 'urn'
-// UUID-NID := 'uuid'
-//
-// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
-// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
-// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
-//
-// hexoct := hexdig hexdig
-// 2hexoct := hexoct hexoct
-// 4hexoct := 2hexoct 2hexoct
-// 6hexoct := 4hexoct 2hexoct
-// 12hexoct := 6hexoct 6hexoct
-//
-// hashlike := 12hexoct
-// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
-//
-// plain := canonical | hashlike
-// uuid := canonical | hashlike | braced | urn
-//
-// braced := '{' plain '}' | '{' hashlike '}'
-// urn := URN ':' UUID-NID ':' plain
-func (u *UUID) UnmarshalText(b []byte) error {
- switch len(b) {
- case 32: // hash
- case 36: // canonical
- case 34, 38:
- if b[0] != '{' || b[len(b)-1] != '}' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b)
- }
- b = b[1 : len(b)-1]
- case 41, 45:
- if string(b[:9]) != "urn:uuid:" {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b[:9])
- }
- b = b[9:]
- default:
- return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(b), b)
- }
- if len(b) == 36 {
- if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b)
- }
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v1 := fromHexChar(b[x])
- v2 := fromHexChar(b[x+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i] = (v1 << 4) | v2
- }
- return nil
- }
- for i := 0; i < 32; i += 2 {
- v1 := fromHexChar(b[i])
- v2 := fromHexChar(b[i+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i/2] = (v1 << 4) | v2
- }
- return nil
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() ([]byte, error) {
- return u.Bytes(), nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return an error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) error {
- if len(data) != Size {
- return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- }
- copy(u[:], data)
-
- return nil
-}
diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go
deleted file mode 100644
index ccf8d4ca29..0000000000
--- a/vendor/github.com/gofrs/uuid/fuzz.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2018 Andrei Tudor Călin
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-//go:build gofuzz
-// +build gofuzz
-
-package uuid
-
-// Fuzz implements a simple fuzz test for FromString / UnmarshalText.
-//
-// To run:
-//
-// $ go get github.com/dvyukov/go-fuzz/...
-// $ cd $GOPATH/src/github.com/gofrs/uuid
-// $ go-fuzz-build github.com/gofrs/uuid
-// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata
-//
-// If you make significant changes to FromString / UnmarshalText and add
-// new cases to fromStringTests (in codec_test.go), please run
-//
-// $ go test -seed_fuzz_corpus
-//
-// to seed the corpus with the new interesting inputs, then run the fuzzer.
-func Fuzz(data []byte) int {
- _, err := FromString(string(data))
- if err != nil {
- return 0
- }
- return 1
-}
diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go
deleted file mode 100644
index 44be9e1585..0000000000
--- a/vendor/github.com/gofrs/uuid/generator.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "net"
- "sync"
- "time"
-)
-
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-// EpochFunc is the function type used to provide the current time.
-type EpochFunc func() time.Time
-
-// HWAddrFunc is the function type used to provide hardware (MAC) addresses.
-type HWAddrFunc func() (net.HardwareAddr, error)
-
-// DefaultGenerator is the default UUID Generator used by this package.
-var DefaultGenerator Generator = NewGen()
-
-// NewV1 returns a UUID based on the current timestamp and MAC address.
-func NewV1() (UUID, error) {
- return DefaultGenerator.NewV1()
-}
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- return DefaultGenerator.NewV3(ns, name)
-}
-
-// NewV4 returns a randomly generated UUID.
-func NewV4() (UUID, error) {
- return DefaultGenerator.NewV4()
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- return DefaultGenerator.NewV5(ns, name)
-}
-
-// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
-// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
-// order being adjusted to allow the UUID to be k-sortable.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func NewV6() (UUID, error) {
- return DefaultGenerator.NewV6()
-}
-
-// NewV7 returns a k-sortable UUID based on the current millisecond precision
-// UNIX epoch and 74 bits of pseudorandom data. It supports single-node batch generation (multiple UUIDs in the same timestamp) with a Monotonic Random counter.
-//
-// This is implemented based on revision 04 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func NewV7() (UUID, error) {
- return DefaultGenerator.NewV7()
-}
-
-// Generator provides an interface for generating UUIDs.
-type Generator interface {
- NewV1() (UUID, error)
- NewV3(ns UUID, name string) UUID
- NewV4() (UUID, error)
- NewV5(ns UUID, name string) UUID
- NewV6() (UUID, error)
- NewV7() (UUID, error)
-}
-
-// Gen is a reference UUID generator based on the specifications laid out in
-// RFC-4122 and DCE 1.1: Authentication and Security Services. This type
-// satisfies the Generator interface as defined in this package.
-//
-// For consumers who are generating V1 UUIDs, but don't want to expose the MAC
-// address of the node generating the UUIDs, the NewGenWithHWAF() function has been
-// provided as a convenience. See the function's documentation for more info.
-//
-// The authors of this package do not feel that the majority of users will need
-// to obfuscate their MAC address, and so we recommend using NewGen() to create
-// a new generator.
-type Gen struct {
- clockSequenceOnce sync.Once
- hardwareAddrOnce sync.Once
- storageMutex sync.Mutex
-
- rand io.Reader
-
- epochFunc EpochFunc
- hwAddrFunc HWAddrFunc
- lastTime uint64
- clockSequence uint16
- hardwareAddr [6]byte
-}
-
-// GenOption is a function type that can be used to configure a Gen generator.
-type GenOption func(*Gen)
-
-// interface check -- build will fail if *Gen doesn't satisfy Generator
-var _ Generator = (*Gen)(nil)
-
-// NewGen returns a new instance of Gen with some default values set. Most
-// people should use this.
-func NewGen() *Gen {
- return NewGenWithHWAF(defaultHWAddrFunc)
-}
-
-// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most
-// consumers should use NewGen() instead.
-//
-// This is used so that consumers can generate their own MAC addresses, for use
-// in the generated UUIDs, if there is some concern about exposing the physical
-// address of the machine generating the UUID.
-//
-// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC
-// address for all the future UUIDs generated by it. If you'd like to switch the
-// MAC address being used, you'll need to create a new generator using this
-// function.
-func NewGenWithHWAF(hwaf HWAddrFunc) *Gen {
- return NewGenWithOptions(WithHWAddrFunc(hwaf))
-}
-
-// NewGenWithOptions returns a new instance of Gen with the options provided.
-// Most people should use NewGen() or NewGenWithHWAF() instead.
-//
-// To customize the generator, you can pass in one or more GenOption functions.
-// For example:
-//
-// gen := NewGenWithOptions(
-// WithHWAddrFunc(myHWAddrFunc),
-// WithEpochFunc(myEpochFunc),
-// WithRandomReader(myRandomReader),
-// )
-//
-// NewGenWithOptions(WithHWAddrFunc(myHWAddrFunc)) is equivalent to calling
-// NewGenWithHWAF(myHWAddrFunc)
-// NewGenWithOptions() is equivalent to calling NewGen()
-func NewGenWithOptions(opts ...GenOption) *Gen {
- gen := &Gen{
- epochFunc: time.Now,
- hwAddrFunc: defaultHWAddrFunc,
- rand: rand.Reader,
- }
-
- for _, opt := range opts {
- opt(gen)
- }
-
- return gen
-}
-
-// WithHWAddrFunc is a GenOption that allows you to provide your own HWAddrFunc
-// function.
-// When this option is nil, the defaultHWAddrFunc is used.
-func WithHWAddrFunc(hwaf HWAddrFunc) GenOption {
- return func(gen *Gen) {
- if hwaf == nil {
- hwaf = defaultHWAddrFunc
- }
-
- gen.hwAddrFunc = hwaf
- }
-}
-
-// WithEpochFunc is a GenOption that allows you to provide your own EpochFunc
-// function.
-// When this option is nil, time.Now is used.
-func WithEpochFunc(epochf EpochFunc) GenOption {
- return func(gen *Gen) {
- if epochf == nil {
- epochf = time.Now
- }
-
- gen.epochFunc = epochf
- }
-}
-
-// WithRandomReader is a GenOption that allows you to provide your own random
-// reader.
-// When this option is nil, the default rand.Reader is used.
-func WithRandomReader(reader io.Reader) GenOption {
- return func(gen *Gen) {
- if reader == nil {
- reader = rand.Reader
- }
-
- gen.rand = reader
- }
-}
-
-// NewV1 returns a UUID based on the current timestamp and MAC address.
-func (g *Gen) NewV1() (UUID, error) {
- u := UUID{}
-
- timeNow, clockSeq, err := g.getClockSequence(false)
- if err != nil {
- return Nil, err
- }
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- hardwareAddr, err := g.getHardwareAddr()
- if err != nil {
- return Nil, err
- }
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(V1)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func (g *Gen) NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(V3)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV4 returns a randomly generated UUID.
-func (g *Gen) NewV4() (UUID, error) {
- u := UUID{}
- if _, err := io.ReadFull(g.rand, u[:]); err != nil {
- return Nil, err
- }
- u.SetVersion(V4)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func (g *Gen) NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(V5)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
-// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
-// order being adjusted to allow the UUID to be k-sortable.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func (g *Gen) NewV6() (UUID, error) {
- var u UUID
-
- if _, err := io.ReadFull(g.rand, u[10:]); err != nil {
- return Nil, err
- }
-
- timeNow, clockSeq, err := g.getClockSequence(false)
- if err != nil {
- return Nil, err
- }
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits)
- binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits)
-
- u.SetVersion(V6)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// getClockSequence returns the epoch and clock sequence for V1,V6 and V7 UUIDs.
-//
-// When useUnixTSMs is false, it uses the Coordinated Universal Time (UTC) as a count of 100-
-//
-// nanosecond intervals since 00:00:00.00, 15 October 1582 (the date of Gregorian reform to the Christian calendar).
-func (g *Gen) getClockSequence(useUnixTSMs bool) (uint64, uint16, error) {
- var err error
- g.clockSequenceOnce.Do(func() {
- buf := make([]byte, 2)
- if _, err = io.ReadFull(g.rand, buf); err != nil {
- return
- }
- g.clockSequence = binary.BigEndian.Uint16(buf)
- })
- if err != nil {
- return 0, 0, err
- }
-
- g.storageMutex.Lock()
- defer g.storageMutex.Unlock()
-
- var timeNow uint64
- if useUnixTSMs {
- timeNow = uint64(g.epochFunc().UnixMilli())
- } else {
- timeNow = g.getEpoch()
- }
- // Clock didn't change since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= g.lastTime {
- g.clockSequence++
- }
- g.lastTime = timeNow
-
- return timeNow, g.clockSequence, nil
-}
-
-// NewV7 returns a k-sortable UUID based on the current millisecond precision
-// UNIX epoch and 74 bits of pseudorandom data.
-//
-// This is implemented based on revision 04 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func (g *Gen) NewV7() (UUID, error) {
- var u UUID
- /* https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#name-uuid-version-7
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms | ver | rand_a |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |var| rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
-
- ms, clockSeq, err := g.getClockSequence(true)
- if err != nil {
- return Nil, err
- }
- //UUIDv7 features a 48 bit timestamp. First 32bit (4bytes) represents seconds since 1970, followed by 2 bytes for the ms granularity.
- u[0] = byte(ms >> 40) //1-6 bytes: big-endian unsigned number of Unix epoch timestamp
- u[1] = byte(ms >> 32)
- u[2] = byte(ms >> 24)
- u[3] = byte(ms >> 16)
- u[4] = byte(ms >> 8)
- u[5] = byte(ms)
-
- //support batching by using a monotonic pseudo-random sequence
- //The 6th byte contains the version and partially rand_a data.
- //We will lose the most significant bites from the clockSeq (with SetVersion), but it is ok, we need the least significant that contains the counter to ensure the monotonic property
- binary.BigEndian.PutUint16(u[6:8], clockSeq) // set rand_a with clock seq which is random and monotonic
-
- //override first 4bits of u[6].
- u.SetVersion(V7)
-
- //set rand_b 64bits of pseudo-random bits (first 2 will be overridden)
- if _, err = io.ReadFull(g.rand, u[8:16]); err != nil {
- return Nil, err
- }
- //override first 2 bits of byte[8] for the variant
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// Returns the hardware address.
-func (g *Gen) getHardwareAddr() ([]byte, error) {
- var err error
- g.hardwareAddrOnce.Do(func() {
- var hwAddr net.HardwareAddr
- if hwAddr, err = g.hwAddrFunc(); err == nil {
- copy(g.hardwareAddr[:], hwAddr)
- return
- }
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence.
- if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil {
- return
- }
- // Set multicast bit as recommended by RFC-4122
- g.hardwareAddr[0] |= 0x01
- })
- if err != nil {
- return []byte{}, err
- }
- return g.hardwareAddr[:], nil
-}
-
-// Returns the difference between UUID epoch (October 15, 1582)
-// and current time in 100-nanosecond intervals.
-func (g *Gen) getEpoch() uint64 {
- return epochStart + uint64(g.epochFunc().UnixNano()/100)
-}
-
-// Returns the UUID based on the hashing of the namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
- return u
-}
-
-var netInterfaces = net.Interfaces
-
-// Returns the hardware address.
-func defaultHWAddrFunc() (net.HardwareAddr, error) {
- ifaces, err := netInterfaces()
- if err != nil {
- return []byte{}, err
- }
- for _, iface := range ifaces {
- if len(iface.HardwareAddr) >= 6 {
- return iface.HardwareAddr, nil
- }
- }
- return []byte{}, fmt.Errorf("uuid: no HW address found")
-}
diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go
deleted file mode 100644
index 01d5d88496..0000000000
--- a/vendor/github.com/gofrs/uuid/sql.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "database/sql"
- "database/sql/driver"
- "fmt"
-)
-
-var _ driver.Valuer = UUID{}
-var _ sql.Scanner = (*UUID)(nil)
-
-// Value implements the driver.Valuer interface.
-func (u UUID) Value() (driver.Value, error) {
- return u.String(), nil
-}
-
-// Scan implements the sql.Scanner interface.
-// A 16-byte slice will be handled by UnmarshalBinary, while
-// a longer byte slice or a string will be handled by UnmarshalText.
-func (u *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case UUID: // support gorm convert from UUID to NullUUID
- *u = src
- return nil
-
- case []byte:
- if len(src) == Size {
- return u.UnmarshalBinary(src)
- }
- return u.UnmarshalText(src)
-
- case string:
- uu, err := FromString(src)
- *u = uu
- return err
- }
-
- return fmt.Errorf("uuid: cannot convert %T to UUID", src)
-}
-
-// NullUUID can be used with the standard sql package to represent a
-// UUID value that can be NULL in the database.
-type NullUUID struct {
- UUID UUID
- Valid bool
-}
-
-// Value implements the driver.Valuer interface.
-func (u NullUUID) Value() (driver.Value, error) {
- if !u.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return u.UUID.Value()
-}
-
-// Scan implements the sql.Scanner interface.
-func (u *NullUUID) Scan(src interface{}) error {
- if src == nil {
- u.UUID, u.Valid = Nil, false
- return nil
- }
-
- // Delegate to UUID Scan function
- u.Valid = true
- return u.UUID.Scan(src)
-}
-
-var nullJSON = []byte("null")
-
-// MarshalJSON marshals the NullUUID as null or the nested UUID
-func (u NullUUID) MarshalJSON() ([]byte, error) {
- if !u.Valid {
- return nullJSON, nil
- }
- var buf [38]byte
- buf[0] = '"'
- encodeCanonical(buf[1:37], u.UUID)
- buf[37] = '"'
- return buf[:], nil
-}
-
-// UnmarshalJSON unmarshals a NullUUID
-func (u *NullUUID) UnmarshalJSON(b []byte) error {
- if string(b) == "null" {
- u.UUID, u.Valid = Nil, false
- return nil
- }
- if n := len(b); n >= 2 && b[0] == '"' {
- b = b[1 : n-1]
- }
- err := u.UUID.UnmarshalText(b)
- u.Valid = (err == nil)
- return err
-}
diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go
deleted file mode 100644
index 5320fb5389..0000000000
--- a/vendor/github.com/gofrs/uuid/uuid.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// Package uuid provides implementations of the Universally Unique Identifier
-// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03).
-//
-// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The
-// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable
-// UUIDs, versions 6 and 7.
-//
-// DCE 1.1[3] provides the specification for version 2, but version 2 support
-// was removed from this package in v4 due to some concerns with the
-// specification itself. Reading the spec, it seems that it would result in
-// generating UUIDs that aren't very unique. In having read the spec it seemed
-// that our implementation did not meet the spec. It also seems to be at-odds
-// with RFC 4122, meaning we would need quite a bit of special code to support
-// it. Lastly, there were no Version 2 implementations that we could find to
-// ensure we were understanding the specification correctly.
-//
-// [1] https://tools.ietf.org/html/rfc4122
-// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03
-// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01
-package uuid
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "time"
-)
-
-// Size of a UUID in bytes.
-const Size = 16
-
-// UUID is an array type to represent the value of a UUID, as defined in RFC-4122.
-type UUID [Size]byte
-
-// UUID versions.
-const (
- _ byte = iota
- V1 // Version 1 (date-time and MAC address)
- _ // Version 2 (date-time and MAC address, DCE security version) [removed]
- V3 // Version 3 (namespace name-based)
- V4 // Version 4 (random)
- V5 // Version 5 (namespace name-based)
- V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft]
- V7 // Version 7 (k-sortable timestamp and random data) [peabody draft]
- _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented]
-)
-
-// UUID layout variants.
-const (
- VariantNCS byte = iota
- VariantRFC4122
- VariantMicrosoft
- VariantFuture
-)
-
-// UUID DCE domains.
-const (
- DomainPerson = iota
- DomainGroup
- DomainOrg
-)
-
-// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00,
-// 15 October 1582 within a V1 UUID. This type has no meaning for other
-// UUID versions since they don't have an embedded timestamp.
-type Timestamp uint64
-
-const _100nsPerSecond = 10000000
-
-// Time returns the UTC time.Time representation of a Timestamp
-func (t Timestamp) Time() (time.Time, error) {
- secs := uint64(t) / _100nsPerSecond
- nsecs := 100 * (uint64(t) % _100nsPerSecond)
-
- return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil
-}
-
-// TimestampFromV1 returns the Timestamp embedded within a V1 UUID.
-// Returns an error if the UUID is any version other than 1.
-func TimestampFromV1(u UUID) (Timestamp, error) {
- if u.Version() != 1 {
- err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version())
- return 0, err
- }
-
- low := binary.BigEndian.Uint32(u[0:4])
- mid := binary.BigEndian.Uint16(u[4:6])
- hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff
-
- return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil
-}
-
-// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This
-// function returns an error if the UUID is any version other than 6.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func TimestampFromV6(u UUID) (Timestamp, error) {
- if u.Version() != 6 {
- return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version())
- }
-
- hi := binary.BigEndian.Uint32(u[0:4])
- mid := binary.BigEndian.Uint16(u[4:6])
- low := binary.BigEndian.Uint16(u[6:8]) & 0xfff
-
- return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil
-}
-
-// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to
-// zero.
-var Nil = UUID{}
-
-// Predefined namespace UUIDs.
-var (
- NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
-)
-
-// IsNil returns if the UUID is equal to the nil UUID
-func (u UUID) IsNil() bool {
- return u == Nil
-}
-
-// Version returns the algorithm version used to generate the UUID.
-func (u UUID) Version() byte {
- return u[6] >> 4
-}
-
-// Variant returns the UUID layout variant.
-func (u UUID) Variant() byte {
- switch {
- case (u[8] >> 7) == 0x00:
- return VariantNCS
- case (u[8] >> 6) == 0x02:
- return VariantRFC4122
- case (u[8] >> 5) == 0x06:
- return VariantMicrosoft
- case (u[8] >> 5) == 0x07:
- fallthrough
- default:
- return VariantFuture
- }
-}
-
-// Bytes returns a byte slice representation of the UUID.
-func (u UUID) Bytes() []byte {
- return u[:]
-}
-
-// encodeCanonical encodes the canonical RFC-4122 form of UUID u into the
-// first 36 bytes dst.
-func encodeCanonical(dst []byte, u UUID) {
- const hextable = "0123456789abcdef"
- dst[8] = '-'
- dst[13] = '-'
- dst[18] = '-'
- dst[23] = '-'
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- c := u[i]
- dst[x] = hextable[c>>4]
- dst[x+1] = hextable[c&0x0f]
- }
-}
-
-// String returns a canonical RFC-4122 string representation of the UUID:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
-func (u UUID) String() string {
- var buf [36]byte
- encodeCanonical(buf[:], u)
- return string(buf[:])
-}
-
-// Format implements fmt.Formatter for UUID values.
-//
-// The behavior is as follows:
-// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'.
-// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation.
-// The 'S' verb returns the RFC-4122 format, but with capital hex digits.
-// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer.
-// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return
-// "%!verb(uuid.UUID=value)" as recommended by the fmt package.
-func (u UUID) Format(f fmt.State, c rune) {
- if c == 'v' && f.Flag('#') {
- fmt.Fprintf(f, "%#v", [Size]byte(u))
- return
- }
- switch c {
- case 'x', 'X':
- b := make([]byte, 32)
- hex.Encode(b, u[:])
- if c == 'X' {
- toUpperHex(b)
- }
- _, _ = f.Write(b)
- case 'v', 's', 'S':
- b, _ := u.MarshalText()
- if c == 'S' {
- toUpperHex(b)
- }
- _, _ = f.Write(b)
- case 'q':
- b := make([]byte, 38)
- b[0] = '"'
- encodeCanonical(b[1:], u)
- b[37] = '"'
- _, _ = f.Write(b)
- default:
- // invalid/unsupported format verb
- fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String())
- }
-}
-
-func toUpperHex(b []byte) {
- for i, c := range b {
- if 'a' <= c && c <= 'f' {
- b[i] = c - ('a' - 'A')
- }
- }
-}
-
-// SetVersion sets the version bits.
-func (u *UUID) SetVersion(v byte) {
- u[6] = (u[6] & 0x0f) | (v << 4)
-}
-
-// SetVariant sets the variant bits.
-func (u *UUID) SetVariant(v byte) {
- switch v {
- case VariantNCS:
- u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
- case VariantRFC4122:
- u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
- case VariantMicrosoft:
- u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
- case VariantFuture:
- fallthrough
- default:
- u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
- }
-}
-
-// Must is a helper that wraps a call to a function returning (UUID, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-//
-// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"))
-func Must(u UUID, err error) UUID {
- if err != nil {
- panic(err)
- }
- return u
-}
diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml
deleted file mode 100644
index 3deb4a1243..0000000000
--- a/vendor/github.com/pborman/uuid/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-
-go:
- - "1.9"
- - "1.10"
- - "1.11"
- - tip
-
-script:
- - go test -v ./...
diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md
deleted file mode 100644
index 04fdf09f13..0000000000
--- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# How to contribute
-
-We definitely welcome patches and contribution to this project!
-
-### Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-You may have already signed it for other Google projects.
diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS
deleted file mode 100644
index b382a04eda..0000000000
--- a/vendor/github.com/pborman/uuid/CONTRIBUTORS
+++ /dev/null
@@ -1 +0,0 @@
-Paul Borman
diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE
deleted file mode 100644
index 5dc68268d9..0000000000
--- a/vendor/github.com/pborman/uuid/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009,2014 Google Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md
deleted file mode 100644
index 810ad40dc9..0000000000
--- a/vendor/github.com/pborman/uuid/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-This project was automatically exported from code.google.com/p/go-uuid
-
-# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master)
-The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services.
-
-This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package).
-
-###### Install
-`go get github.com/pborman/uuid`
-
-###### Documentation
-[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid)
-
-Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here:
-http://godoc.org/github.com/pborman/uuid
diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go
deleted file mode 100644
index 50a0f2d099..0000000000
--- a/vendor/github.com/pborman/uuid/dce.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "encoding/binary"
- "fmt"
- "os"
-)
-
-// A Domain represents a Version 2 domain
-type Domain byte
-
-// Domain constants for DCE Security (Version 2) UUIDs.
-const (
- Person = Domain(0)
- Group = Domain(1)
- Org = Domain(2)
-)
-
-// NewDCESecurity returns a DCE Security (Version 2) UUID.
-//
-// The domain should be one of Person, Group or Org.
-// On a POSIX system the id should be the users UID for the Person
-// domain and the users GID for the Group. The meaning of id for
-// the domain Org or on non-POSIX systems is site defined.
-//
-// For a given domain/id pair the same token may be returned for up to
-// 7 minutes and 10 seconds.
-func NewDCESecurity(domain Domain, id uint32) UUID {
- uuid := NewUUID()
- if uuid != nil {
- uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
- uuid[9] = byte(domain)
- binary.BigEndian.PutUint32(uuid[0:], id)
- }
- return uuid
-}
-
-// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
-// domain with the id returned by os.Getuid.
-//
-// NewDCEPerson(Person, uint32(os.Getuid()))
-func NewDCEPerson() UUID {
- return NewDCESecurity(Person, uint32(os.Getuid()))
-}
-
-// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
-// domain with the id returned by os.Getgid.
-//
-// NewDCEGroup(Group, uint32(os.Getgid()))
-func NewDCEGroup() UUID {
- return NewDCESecurity(Group, uint32(os.Getgid()))
-}
-
-// Domain returns the domain for a Version 2 UUID or false.
-func (uuid UUID) Domain() (Domain, bool) {
- if v, _ := uuid.Version(); v != 2 {
- return 0, false
- }
- return Domain(uuid[9]), true
-}
-
-// Id returns the id for a Version 2 UUID or false.
-func (uuid UUID) Id() (uint32, bool) {
- if v, _ := uuid.Version(); v != 2 {
- return 0, false
- }
- return binary.BigEndian.Uint32(uuid[0:4]), true
-}
-
-func (d Domain) String() string {
- switch d {
- case Person:
- return "Person"
- case Group:
- return "Group"
- case Org:
- return "Org"
- }
- return fmt.Sprintf("Domain%d", int(d))
-}
diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go
deleted file mode 100644
index 727d761674..0000000000
--- a/vendor/github.com/pborman/uuid/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The uuid package generates and inspects UUIDs.
-//
-// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
-// Services.
-//
-// This package is a partial wrapper around the github.com/google/uuid package.
-// This package represents a UUID as []byte while github.com/google/uuid
-// represents a UUID as [16]byte.
-package uuid
diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go
deleted file mode 100644
index a0420c1ef3..0000000000
--- a/vendor/github.com/pborman/uuid/hash.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/sha1"
- "hash"
-)
-
-// Well known Name Space IDs and UUIDs
-var (
- NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
- NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
- NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
- NIL = Parse("00000000-0000-0000-0000-000000000000")
-)
-
-// NewHash returns a new UUID derived from the hash of space concatenated with
-// data generated by h. The hash should be at least 16 byte in length. The
-// first 16 bytes of the hash are used to form the UUID. The version of the
-// UUID will be the lower 4 bits of version. NewHash is used to implement
-// NewMD5 and NewSHA1.
-func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
- h.Reset()
- h.Write(space)
- h.Write([]byte(data))
- s := h.Sum(nil)
- uuid := make([]byte, 16)
- copy(uuid, s)
- uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
- uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
- return uuid
-}
-
-// NewMD5 returns a new MD5 (Version 3) UUID based on the
-// supplied name space and data.
-//
-// NewHash(md5.New(), space, data, 3)
-func NewMD5(space UUID, data []byte) UUID {
- return NewHash(md5.New(), space, data, 3)
-}
-
-// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
-// supplied name space and data.
-//
-// NewHash(sha1.New(), space, data, 5)
-func NewSHA1(space UUID, data []byte) UUID {
- return NewHash(sha1.New(), space, data, 5)
-}
diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go
deleted file mode 100644
index 35b89352ad..0000000000
--- a/vendor/github.com/pborman/uuid/marshal.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "errors"
- "fmt"
-
- guuid "github.com/google/uuid"
-)
-
-// MarshalText implements encoding.TextMarshaler.
-func (u UUID) MarshalText() ([]byte, error) {
- if len(u) != 16 {
- return nil, nil
- }
- var js [36]byte
- encodeHex(js[:], u)
- return js[:], nil
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (u *UUID) UnmarshalText(data []byte) error {
- if len(data) == 0 {
- return nil
- }
- id := Parse(string(data))
- if id == nil {
- return errors.New("invalid UUID")
- }
- *u = id
- return nil
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.
-func (u UUID) MarshalBinary() ([]byte, error) {
- return u[:], nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.
-func (u *UUID) UnmarshalBinary(data []byte) error {
- if len(data) == 0 {
- return nil
- }
- if len(data) != 16 {
- return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
- }
- var id [16]byte
- copy(id[:], data)
- *u = id[:]
- return nil
-}
-
-// MarshalText implements encoding.TextMarshaler.
-func (u Array) MarshalText() ([]byte, error) {
- var js [36]byte
- encodeHex(js[:], u[:])
- return js[:], nil
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (u *Array) UnmarshalText(data []byte) error {
- id, err := guuid.ParseBytes(data)
- if err != nil {
- return err
- }
- *u = Array(id)
- return nil
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.
-func (u Array) MarshalBinary() ([]byte, error) {
- return u[:], nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.
-func (u *Array) UnmarshalBinary(data []byte) error {
- if len(data) != 16 {
- return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
- }
- copy(u[:], data)
- return nil
-}
diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go
deleted file mode 100644
index e524e0101b..0000000000
--- a/vendor/github.com/pborman/uuid/node.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- guuid "github.com/google/uuid"
-)
-
-// NodeInterface returns the name of the interface from which the NodeID was
-// derived. The interface "user" is returned if the NodeID was set by
-// SetNodeID.
-func NodeInterface() string {
- return guuid.NodeInterface()
-}
-
-// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
-// If name is "" then the first usable interface found will be used or a random
-// Node ID will be generated. If a named interface cannot be found then false
-// is returned.
-//
-// SetNodeInterface never fails when name is "".
-func SetNodeInterface(name string) bool {
- return guuid.SetNodeInterface(name)
-}
-
-// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
-// if not already set.
-func NodeID() []byte {
- return guuid.NodeID()
-}
-
-// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
-// of id are used. If id is less than 6 bytes then false is returned and the
-// Node ID is not set.
-func SetNodeID(id []byte) bool {
- return guuid.SetNodeID(id)
-}
-
-// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
-// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
-func (uuid UUID) NodeID() []byte {
- if len(uuid) != 16 {
- return nil
- }
- node := make([]byte, 6)
- copy(node, uuid[10:])
- return node
-}
diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go
deleted file mode 100644
index 929c3847e2..0000000000
--- a/vendor/github.com/pborman/uuid/sql.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "database/sql/driver"
- "errors"
- "fmt"
-)
-
-// Scan implements sql.Scanner so UUIDs can be read from databases transparently
-// Currently, database types that map to string and []byte are supported. Please
-// consult database-specific driver documentation for matching types.
-func (uuid *UUID) Scan(src interface{}) error {
- switch src.(type) {
- case string:
- // if an empty UUID comes from a table, we return a null UUID
- if src.(string) == "" {
- return nil
- }
-
- // see uuid.Parse for required string format
- parsed := Parse(src.(string))
-
- if parsed == nil {
- return errors.New("Scan: invalid UUID format")
- }
-
- *uuid = parsed
- case []byte:
- b := src.([]byte)
-
- // if an empty UUID comes from a table, we return a null UUID
- if len(b) == 0 {
- return nil
- }
-
- // assumes a simple slice of bytes if 16 bytes
- // otherwise attempts to parse
- if len(b) == 16 {
- parsed := make([]byte, 16)
- copy(parsed, b)
- *uuid = UUID(parsed)
- } else {
- u := Parse(string(b))
-
- if u == nil {
- return errors.New("Scan: invalid UUID format")
- }
-
- *uuid = u
- }
-
- default:
- return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
- }
-
- return nil
-}
-
-// Value implements sql.Valuer so that UUIDs can be written to databases
-// transparently. Currently, UUIDs map to strings. Please consult
-// database-specific driver documentation for matching types.
-func (uuid UUID) Value() (driver.Value, error) {
- return uuid.String(), nil
-}
diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go
deleted file mode 100644
index 5c0960d872..0000000000
--- a/vendor/github.com/pborman/uuid/time.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "encoding/binary"
-
- guuid "github.com/google/uuid"
-)
-
-// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
-// 1582.
-type Time = guuid.Time
-
-// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
-// clock sequence as well as adjusting the clock sequence as needed. An error
-// is returned if the current time cannot be determined.
-func GetTime() (Time, uint16, error) { return guuid.GetTime() }
-
-// ClockSequence returns the current clock sequence, generating one if not
-// already set. The clock sequence is only used for Version 1 UUIDs.
-//
-// The uuid package does not use global static storage for the clock sequence or
-// the last time a UUID was generated. Unless SetClockSequence a new random
-// clock sequence is generated the first time a clock sequence is requested by
-// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
-// for
-func ClockSequence() int { return guuid.ClockSequence() }
-
-// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
-// -1 causes a new sequence to be generated.
-func SetClockSequence(seq int) { guuid.SetClockSequence(seq) }
-
-// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. It returns false if uuid is not valid. The time is only well defined
-// for version 1 and 2 UUIDs.
-func (uuid UUID) Time() (Time, bool) {
- if len(uuid) != 16 {
- return 0, false
- }
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time), true
-}
-
-// ClockSequence returns the clock sequence encoded in uuid. It returns false
-// if uuid is not valid. The clock sequence is only well defined for version 1
-// and 2 UUIDs.
-func (uuid UUID) ClockSequence() (int, bool) {
- if len(uuid) != 16 {
- return 0, false
- }
- return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
-}
diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go
deleted file mode 100644
index 255b5e2485..0000000000
--- a/vendor/github.com/pborman/uuid/util.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-// xvalues returns the value of a byte as a hexadecimal digit or 255.
-var xvalues = [256]byte{
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
- 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-}
-
-// xtob converts the the first two hex bytes of x into a byte.
-func xtob(x string) (byte, bool) {
- b1 := xvalues[x[0]]
- b2 := xvalues[x[1]]
- return (b1 << 4) | b2, b1 != 255 && b2 != 255
-}
diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go
deleted file mode 100644
index 3370004207..0000000000
--- a/vendor/github.com/pborman/uuid/uuid.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/hex"
- "io"
-
- guuid "github.com/google/uuid"
-)
-
-// Array is a pass-by-value UUID that can be used as an effecient key in a map.
-type Array [16]byte
-
-// UUID converts uuid into a slice.
-func (uuid Array) UUID() UUID {
- return uuid[:]
-}
-
-// String returns the string representation of uuid,
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
-func (uuid Array) String() string {
- return guuid.UUID(uuid).String()
-}
-
-// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
-// 4122.
-type UUID []byte
-
-// A Version represents a UUIDs version.
-type Version = guuid.Version
-
-// A Variant represents a UUIDs variant.
-type Variant = guuid.Variant
-
-// Constants returned by Variant.
-const (
- Invalid = guuid.Invalid // Invalid UUID
- RFC4122 = guuid.RFC4122 // The variant specified in RFC4122
- Reserved = guuid.Reserved // Reserved, NCS backward compatibility.
- Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility.
- Future = guuid.Future // Reserved for future definition.
-)
-
-var rander = rand.Reader // random function
-
-// New returns a new random (version 4) UUID as a string. It is a convenience
-// function for NewRandom().String().
-func New() string {
- return NewRandom().String()
-}
-
-// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for
-// the formats parsed.
-func Parse(s string) UUID {
- gu, err := guuid.Parse(s)
- if err == nil {
- return gu[:]
- }
- return nil
-}
-
-// ParseBytes is like Parse, except it parses a byte slice instead of a string.
-func ParseBytes(b []byte) (UUID, error) {
- gu, err := guuid.ParseBytes(b)
- if err == nil {
- return gu[:], nil
- }
- return nil, err
-}
-
-// Equal returns true if uuid1 and uuid2 are equal.
-func Equal(uuid1, uuid2 UUID) bool {
- return bytes.Equal(uuid1, uuid2)
-}
-
-// Array returns an array representation of uuid that can be used as a map key.
-// Array panics if uuid is not valid.
-func (uuid UUID) Array() Array {
- if len(uuid) != 16 {
- panic("invalid uuid")
- }
- var a Array
- copy(a[:], uuid)
- return a
-}
-
-// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// , or "" if uuid is invalid.
-func (uuid UUID) String() string {
- if len(uuid) != 16 {
- return ""
- }
- var buf [36]byte
- encodeHex(buf[:], uuid)
- return string(buf[:])
-}
-
-// URN returns the RFC 2141 URN form of uuid,
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
-func (uuid UUID) URN() string {
- if len(uuid) != 16 {
- return ""
- }
- var buf [36 + 9]byte
- copy(buf[:], "urn:uuid:")
- encodeHex(buf[9:], uuid)
- return string(buf[:])
-}
-
-func encodeHex(dst []byte, uuid UUID) {
- hex.Encode(dst[:], uuid[:4])
- dst[8] = '-'
- hex.Encode(dst[9:13], uuid[4:6])
- dst[13] = '-'
- hex.Encode(dst[14:18], uuid[6:8])
- dst[18] = '-'
- hex.Encode(dst[19:23], uuid[8:10])
- dst[23] = '-'
- hex.Encode(dst[24:], uuid[10:])
-}
-
-// Variant returns the variant encoded in uuid. It returns Invalid if
-// uuid is invalid.
-func (uuid UUID) Variant() Variant {
- if len(uuid) != 16 {
- return Invalid
- }
- switch {
- case (uuid[8] & 0xc0) == 0x80:
- return RFC4122
- case (uuid[8] & 0xe0) == 0xc0:
- return Microsoft
- case (uuid[8] & 0xe0) == 0xe0:
- return Future
- default:
- return Reserved
- }
-}
-
-// Version returns the version of uuid. It returns false if uuid is not
-// valid.
-func (uuid UUID) Version() (Version, bool) {
- if len(uuid) != 16 {
- return 0, false
- }
- return Version(uuid[6] >> 4), true
-}
-
-// SetRand sets the random number generator to r, which implements io.Reader.
-// If r.Read returns an error when the package requests random data then
-// a panic will be issued.
-//
-// Calling SetRand with nil sets the random number generator to the default
-// generator.
-func SetRand(r io.Reader) {
- guuid.SetRand(r)
-}
diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go
deleted file mode 100644
index 7af948da79..0000000000
--- a/vendor/github.com/pborman/uuid/version1.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- guuid "github.com/google/uuid"
-)
-
-// NewUUID returns a Version 1 UUID based on the current NodeID and clock
-// sequence, and the current time. If the NodeID has not been set by SetNodeID
-// or SetNodeInterface then it will be set automatically. If the NodeID cannot
-// be set NewUUID returns nil. If clock sequence has not been set by
-// SetClockSequence then it will be set automatically. If GetTime fails to
-// return the current NewUUID returns nil.
-func NewUUID() UUID {
- gu, err := guuid.NewUUID()
- if err == nil {
- return UUID(gu[:])
- }
- return nil
-}
diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go
deleted file mode 100644
index b459d46d13..0000000000
--- a/vendor/github.com/pborman/uuid/version4.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import guuid "github.com/google/uuid"
-
-// Random returns a Random (Version 4) UUID or panics.
-//
-// The strength of the UUIDs is based on the strength of the crypto/rand
-// package.
-//
-// A note about uniqueness derived from the UUID Wikipedia entry:
-//
-// Randomly generated UUIDs have 122 random bits. One's annual risk of being
-// hit by a meteorite is estimated to be one chance in 17 billion, that
-// means the probability is about 0.00000000006 (6 × 10−11),
-// equivalent to the odds of creating a few tens of trillions of UUIDs in a
-// year and having one duplicate.
-func NewRandom() UUID {
- if gu, err := guuid.NewRandom(); err == nil {
- return UUID(gu[:])
- }
- return nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3e0bb53e99..24975773db 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -8,7 +8,6 @@ github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/cont
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage
-github.com/Azure/azure-sdk-for-go/storage
github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
## explicit; go 1.18
@@ -223,9 +222,6 @@ github.com/go-openapi/swag
# github.com/go-task/slim-sprig/v3 v3.0.0
## explicit; go 1.20
github.com/go-task/slim-sprig/v3
-# github.com/gofrs/uuid v4.4.0+incompatible
-## explicit
-github.com/gofrs/uuid
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/gogoproto
@@ -434,9 +430,6 @@ github.com/opencontainers/go-digest
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalkdir
-# github.com/pborman/uuid v1.2.0
-## explicit
-github.com/pborman/uuid
# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
## explicit; go 1.14
github.com/pkg/browser